diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu index 9ebca6a750f33b218341b6b9a708726f05890c36..5abe1cc9f0682128d5e3a648d449aa91015ea071 100644 --- a/Documentation/ABI/testing/sysfs-devices-system-cpu +++ b/Documentation/ABI/testing/sysfs-devices-system-cpu @@ -381,6 +381,7 @@ What: /sys/devices/system/cpu/vulnerabilities /sys/devices/system/cpu/vulnerabilities/spec_store_bypass /sys/devices/system/cpu/vulnerabilities/l1tf /sys/devices/system/cpu/vulnerabilities/mds + /sys/devices/system/cpu/vulnerabilities/srbds /sys/devices/system/cpu/vulnerabilities/tsx_async_abort /sys/devices/system/cpu/vulnerabilities/itlb_multihit Date: January 2018 diff --git a/Documentation/ABI/testing/sysfs-fs-f2fs b/Documentation/ABI/testing/sysfs-fs-f2fs index a67387006a0fc1152e75ae09c3dee64dccc73b66..f007cb671a47786b10e9ad1fbd9b1b2e1903b06c 100644 --- a/Documentation/ABI/testing/sysfs-fs-f2fs +++ b/Documentation/ABI/testing/sysfs-fs-f2fs @@ -333,6 +333,15 @@ Description: Give a way to attach REQ_META|FUA to data writes * 5 | 4 | 3 | 2 | 1 | 0 | * Cold | Warm | Hot | Cold | Warm | Hot | +What: /sys/fs/f2fs//node_io_flag +Date: June 2020 +Contact: "Jaegeuk Kim" +Description: Give a way to attach REQ_META|FUA to node writes + given temperature-based bits. Now the bits indicate: + * REQ_META | REQ_FUA | + * 5 | 4 | 3 | 2 | 1 | 0 | + * Cold | Warm | Hot | Cold | Warm | Hot | + What: /sys/fs/f2fs//iostat_period_ms Date: April 2020 Contact: "Daeho Jeong" diff --git a/Documentation/admin-guide/hw-vuln/index.rst b/Documentation/admin-guide/hw-vuln/index.rst index 0795e3c2643f2946c3f1a58418bc6d28c376ec06..ca4dbdd9016d5a873b11381dbe75bdff5ee13038 100644 --- a/Documentation/admin-guide/hw-vuln/index.rst +++ b/Documentation/admin-guide/hw-vuln/index.rst @@ -14,3 +14,4 @@ are configurable at compile, boot or run time. mds tsx_async_abort multihit.rst + special-register-buffer-data-sampling.rst diff --git a/Documentation/admin-guide/hw-vuln/special-register-buffer-data-sampling.rst b/Documentation/admin-guide/hw-vuln/special-register-buffer-data-sampling.rst new file mode 100644 index 0000000000000000000000000000000000000000..47b1b3afac994beb278d4923e262a1da7bad5a23 --- /dev/null +++ b/Documentation/admin-guide/hw-vuln/special-register-buffer-data-sampling.rst @@ -0,0 +1,149 @@ +.. SPDX-License-Identifier: GPL-2.0 + +SRBDS - Special Register Buffer Data Sampling +============================================= + +SRBDS is a hardware vulnerability that allows MDS :doc:`mds` techniques to +infer values returned from special register accesses. Special register +accesses are accesses to off core registers. According to Intel's evaluation, +the special register reads that have a security expectation of privacy are +RDRAND, RDSEED and SGX EGETKEY. + +When RDRAND, RDSEED and EGETKEY instructions are used, the data is moved +to the core through the special register mechanism that is susceptible +to MDS attacks. + +Affected processors +-------------------- +Core models (desktop, mobile, Xeon-E3) that implement RDRAND and/or RDSEED may +be affected. + +A processor is affected by SRBDS if its Family_Model and stepping is +in the following list, with the exception of the listed processors +exporting MDS_NO while Intel TSX is available yet not enabled. The +latter class of processors are only affected when Intel TSX is enabled +by software using TSX_CTRL_MSR otherwise they are not affected. + + ============= ============ ======== + common name Family_Model Stepping + ============= ============ ======== + IvyBridge 06_3AH All + + Haswell 06_3CH All + Haswell_L 06_45H All + Haswell_G 06_46H All + + Broadwell_G 06_47H All + Broadwell 06_3DH All + + Skylake_L 06_4EH All + Skylake 06_5EH All + + Kabylake_L 06_8EH <= 0xC + Kabylake 06_9EH <= 0xD + ============= ============ ======== + +Related CVEs +------------ + +The following CVE entry is related to this SRBDS issue: + + ============== ===== ===================================== + CVE-2020-0543 SRBDS Special Register Buffer Data Sampling + ============== ===== ===================================== + +Attack scenarios +---------------- +An unprivileged user can extract values returned from RDRAND and RDSEED +executed on another core or sibling thread using MDS techniques. + + +Mitigation mechanism +------------------- +Intel will release microcode updates that modify the RDRAND, RDSEED, and +EGETKEY instructions to overwrite secret special register data in the shared +staging buffer before the secret data can be accessed by another logical +processor. + +During execution of the RDRAND, RDSEED, or EGETKEY instructions, off-core +accesses from other logical processors will be delayed until the special +register read is complete and the secret data in the shared staging buffer is +overwritten. + +This has three effects on performance: + +#. RDRAND, RDSEED, or EGETKEY instructions have higher latency. + +#. Executing RDRAND at the same time on multiple logical processors will be + serialized, resulting in an overall reduction in the maximum RDRAND + bandwidth. + +#. Executing RDRAND, RDSEED or EGETKEY will delay memory accesses from other + logical processors that miss their core caches, with an impact similar to + legacy locked cache-line-split accesses. + +The microcode updates provide an opt-out mechanism (RNGDS_MITG_DIS) to disable +the mitigation for RDRAND and RDSEED instructions executed outside of Intel +Software Guard Extensions (Intel SGX) enclaves. On logical processors that +disable the mitigation using this opt-out mechanism, RDRAND and RDSEED do not +take longer to execute and do not impact performance of sibling logical +processors memory accesses. The opt-out mechanism does not affect Intel SGX +enclaves (including execution of RDRAND or RDSEED inside an enclave, as well +as EGETKEY execution). + +IA32_MCU_OPT_CTRL MSR Definition +-------------------------------- +Along with the mitigation for this issue, Intel added a new thread-scope +IA32_MCU_OPT_CTRL MSR, (address 0x123). The presence of this MSR and +RNGDS_MITG_DIS (bit 0) is enumerated by CPUID.(EAX=07H,ECX=0).EDX[SRBDS_CTRL = +9]==1. This MSR is introduced through the microcode update. + +Setting IA32_MCU_OPT_CTRL[0] (RNGDS_MITG_DIS) to 1 for a logical processor +disables the mitigation for RDRAND and RDSEED executed outside of an Intel SGX +enclave on that logical processor. Opting out of the mitigation for a +particular logical processor does not affect the RDRAND and RDSEED mitigations +for other logical processors. + +Note that inside of an Intel SGX enclave, the mitigation is applied regardless +of the value of RNGDS_MITG_DS. + +Mitigation control on the kernel command line +--------------------------------------------- +The kernel command line allows control over the SRBDS mitigation at boot time +with the option "srbds=". The option for this is: + + ============= ============================================================= + off This option disables SRBDS mitigation for RDRAND and RDSEED on + affected platforms. + ============= ============================================================= + +SRBDS System Information +----------------------- +The Linux kernel provides vulnerability status information through sysfs. For +SRBDS this can be accessed by the following sysfs file: +/sys/devices/system/cpu/vulnerabilities/srbds + +The possible values contained in this file are: + + ============================== ============================================= + Not affected Processor not vulnerable + Vulnerable Processor vulnerable and mitigation disabled + Vulnerable: No microcode Processor vulnerable and microcode is missing + mitigation + Mitigation: Microcode Processor is vulnerable and mitigation is in + effect. + Mitigation: TSX disabled Processor is only vulnerable when TSX is + enabled while this system was booted with TSX + disabled. + Unknown: Dependent on + hypervisor status Running on virtual guest processor that is + affected but with no way to know if host + processor is mitigated or vulnerable. + ============================== ============================================= + +SRBDS Default mitigation +------------------------ +This new microcode serializes processor access during execution of RDRAND, +RDSEED ensures that the shared buffer is overwritten before it is released for +reuse. Use the "srbds=off" kernel command line to disable the mitigation for +RDRAND and RDSEED. diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index d545de46d7def45239ec5ea4d514b5c296a9fb4f..26a29f7723fee7b3a42fb433143fff695af3961a 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -4271,6 +4271,26 @@ spia_pedr= spia_peddr= + srbds= [X86,INTEL] + Control the Special Register Buffer Data Sampling + (SRBDS) mitigation. + + Certain CPUs are vulnerable to an MDS-like + exploit which can leak bits from the random + number generator. + + By default, this issue is mitigated by + microcode. However, the microcode fix can cause + the RDRAND and RDSEED instructions to become + much slower. Among other effects, this will + result in reduced throughput from /dev/urandom. + + The microcode mitigation can be disabled with + the following option: + + off: Disable mitigation and remove + performance impact to RDRAND and RDSEED + srcutree.counter_wrap_check [KNL] Specifies how frequently to check for grace-period sequence counter wrap for the diff --git a/Documentation/devicetree/bindings/arm/msm/msm.txt b/Documentation/devicetree/bindings/arm/msm/msm.txt index 2888320983100a10b9278406b1d72225d2f54306..6ab9d8a49f5b80430b1e7ef2cb82bb9db3e39fdf 100644 --- a/Documentation/devicetree/bindings/arm/msm/msm.txt +++ b/Documentation/devicetree/bindings/arm/msm/msm.txt @@ -314,3 +314,4 @@ compatible = "qcom,trinket-iot" compatible = "qcom,trinketp-iot" compatible = "qcom,trinket-iot-idp" compatible = "qcom,trinketp-iot-idp" +compatible = "qcom,qcs610-idp" diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.txt b/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.txt index b6a7e7397b8b47ef4d250fd6f8dba971ee23dc00..b944fe06718852cb11dadff33b3e789f47d6fa87 100644 --- a/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.txt +++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.txt @@ -16,6 +16,9 @@ Required properties: Documentation/devicetree/bindings/graph.txt. This port should be connected to the input port of an attached HDMI or LVDS encoder chip. +Optional properties: +- pinctrl-names: Contain "default" and "sleep". + Example: dpi0: dpi@1401d000 { @@ -26,6 +29,9 @@ dpi0: dpi@1401d000 { <&mmsys CLK_MM_DPI_ENGINE>, <&apmixedsys CLK_APMIXED_TVDPLL>; clock-names = "pixel", "engine", "pll"; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&dpi_pin_func>; + pinctrl-1 = <&dpi_pin_idle>; port { dpi0_out: endpoint { diff --git a/Documentation/devicetree/bindings/drm/msm/sde-dp-sim.txt b/Documentation/devicetree/bindings/drm/msm/sde-dp-sim.txt index f24aadeec64cd9b6351eb1657146369cadc14bb3..0af3e24fe83bdcaf90bd1134c0fa365e03c7db3d 100644 --- a/Documentation/devicetree/bindings/drm/msm/sde-dp-sim.txt +++ b/Documentation/devicetree/bindings/drm/msm/sde-dp-sim.txt @@ -3,7 +3,34 @@ QTI Snapdragon Display Engine (SDE) DP-MST sideband message emulation driver Required properties: - compatible: "qcom,dp-mst-sim" +Optional properties: +- qcom,dpcd-max-rate: A u32 property defines dpcd reg 0x1. +- qcom,dpcd-max-lane: A u32 property defines dpcd reg 0x2. +- qcom,dpcd-mst: A u32 property defines dpcd reg 0x21. +- qcom,dpcd-regs: A array property defines dpcd reg addr + and val. +- qcom,voltage-swing: A u32 property that overwrite dpcd reg 0x103~0x106. +- qcom,pre-emphasis: A u32 property that overwrite dpcd reg 0x103~0x106. +- qcom,link-training-cnt: A u32 property defines physical link training cnt + before switching to simulation mode. +- qcom,skip-edid: A boolean property defines if edid will read from + internal edid buffer. +- qcom,skip-dpcd-read: A boolean property defines if dpcd will read from + internal dpcd buffer. +- qcom,skip-link-training: A boolean property defines if link training will + be skipped. +- qcom,skip-dpcd-write: A boolean property defines if dpcd write will be + ignored. +- qcom,skip-hpd: A boolean property defines if HPD will be ignored + and always set high. +- qcom,skip-mst: A boolean property defines if MST sideband message + need to be simulated. + Each child node represents a port at root branch, with properties: +- qcom,edid: A u8 array property defines EDID raw data. Once + defined, below properties will be ignored. For any + mode with horizontal width > 4095, only EDID raw + data format is supported. - qcom,mode-h-active: A u32 property defines the horizontal active size. - qcom,mode-h-front-porch: A u32 property defines the horizontal front porch. - qcom,mode-h-pulse-width: A u32 property defines the horizontal pulse. diff --git a/Documentation/devicetree/bindings/drm/msm/sde-dp.txt b/Documentation/devicetree/bindings/drm/msm/sde-dp.txt index b04b5b97f343057bcff4265d055195c50e0bcf11..34f152fce612e579ba39a3f618847031d9bb73bc 100644 --- a/Documentation/devicetree/bindings/drm/msm/sde-dp.txt +++ b/Documentation/devicetree/bindings/drm/msm/sde-dp.txt @@ -121,6 +121,11 @@ Optional properties: - qcom,dp-aux-bridge: phandle for dp aux bridge module, for 3rd party dp bridge only. - qcom,dp-aux-bridge-sim: phandle for dp aux bridge module, for internal mst debug simulation only. - qcom,dp-force-bond-mode: Ignore tile information from EDID and force DP to work in bond mode. +- qcom,dp-force-connect-mode: Boolean to specify if dp is in always-connected mode, only set + when qcom,dp-aux-bridge is defined. Once set, dp will always + report connected state to user. When HPD is high, mode from sink + will be reported. When HPD is low, mode from dp-aux-bridge + simulator will be reported. - qcom,bond-dual-ctrl: u32 array to specify the cell-index of the two DP controllers that support bond mode. The first controller in the array is the bond master. Driver will switch to bond mode if both DP controllers are connected to the same dual DP input monitor. diff --git a/Documentation/devicetree/bindings/net/qcom,stmmac-ethqos.txt b/Documentation/devicetree/bindings/net/qcom,stmmac-ethqos.txt index 7011dff0b7d9fd01eb51df18be353a42abca75a2..94dbe4b7077ae70d32bad7ec32ca736a552c89f1 100644 --- a/Documentation/devicetree/bindings/net/qcom,stmmac-ethqos.txt +++ b/Documentation/devicetree/bindings/net/qcom,stmmac-ethqos.txt @@ -116,6 +116,14 @@ Optional properties: - snps,priority: TX queue priority (Range: 0x0 to 0xF) - rx-prog-swap: boolean value to enable RX_PROG_SWAP for 10/100M - rx-dll-bypass: boolean value to indicate RX DLL in bypass mode. +- qcom,qoe_mode: set 1 to enable qmi over ethernet support +- qcom,qoe-queue: qmi over ethernet receiver traffic queue +- qcom,qoe-vlan-offset: qmi over ethernet vlan filterin register offset +- qcom,cv2x_mode: enable cv2x over ethernet: + set 1 for driver running in MDM device + set 2 for driver running in APQ device +- qcom,cv2x-queue: cv2x over ethernet receiver traffic queue +- qcom,cv2x-vlan-offset: cv2x over ethernet vlan filterin register offset Examples: stmmac_axi_setup: stmmac-axi-config { diff --git a/Documentation/devicetree/bindings/sound/qcom,hsi2s.txt b/Documentation/devicetree/bindings/sound/qcom,hsi2s.txt index 8771004e81c960f1d9e1fe2a1568aa145bbfe55e..11b94015a3d125d03c464d8dd3e6385fede2451a 100644 --- a/Documentation/devicetree/bindings/sound/qcom,hsi2s.txt +++ b/Documentation/devicetree/bindings/sound/qcom,hsi2s.txt @@ -16,6 +16,8 @@ Required properties: - interrupts : Interrupt number used by this interface - clocks : Core clocks used by this interface - clock-names : Clock names for each core clock + - iommus: The phandle and stream IDs for the SMMU used by this root + - qcom,iova-mapping: Specifies the start address and size of iova space Optional properties: @@ -26,6 +28,7 @@ Optional properties: rate detectors - rate-detector-interfaces : Specifies the minor number of the interfaces to have rate detection enabled + - qcom,smmu-s1-bypass: Boolean, if present S1 bypass is enabled * HS-I2S interface nodes @@ -40,14 +43,11 @@ Required properties: - clock-names : Clock name for the interface clock - pinctrl-names : Pinctrl state names for each pin group configuration - pinctrl-x : Defines pinctrl state for each pin group - - iommus: The phandle and stream IDs for the SMMU used by this root - - qcom,iova-mapping: Specifies the start address and size of iova space - bit-clock-hz : Default bit clock frequency in hertz - data-buffer-ms : Default periodic interrupt interval in milliseconds Optional properties: - - qcom,smmu-s1-bypass: Boolean, if present S1 bypass is enabled - bit-depth : Bit depth of the I2S data Default - 32 - spkr-channel-count : Number of speaker channels @@ -125,6 +125,9 @@ hsi2s: qcom,hsi2s { "csr_hclk"; number-of-rate-detectors = <2>; rate-detector-interfaces = <0 1>; + iommus = <&apps_smmu 0x035C 0x1>; + qcom,smmu-s1-bypass; + qcom,iova-mapping = <0x0 0xFFFFFFFF>; sdr0: qcom,hs0_i2s { compatible = "qcom,hsi2s-interface"; @@ -136,9 +139,6 @@ hsi2s: qcom,hsi2s { &hs0_i2s_data1_sleep>; clocks = <&clock_gcc GCC_SDR_PRI_MI2S_CLK>; clock-names = "pri_mi2s_clk"; - iommus = <&apps_smmu 0x035C 0x0>; - qcom,smmu-s1-bypass; - qcom,iova-mapping = <0x0 0xFFFFFFFF>; bit-clock-hz = <12288000>; data-buffer-ms = <10>; bit-depth = <32>; @@ -168,9 +168,6 @@ hsi2s: qcom,hsi2s { &hs1_i2s_data1_sleep>; clocks = <&clock_gcc GCC_SDR_SEC_MI2S_CLK>; clock-names = "sec_mi2s_clk"; - iommus = <&apps_smmu 0x035D 0x0>; - qcom,smmu-s1-bypass; - qcom,iova-mapping = <0x0 0xFFFFFFFF>; bit-clock-hz = <12288000>; data-buffer-ms = <10>; bit-depth = <32>; diff --git a/Documentation/devicetree/bindings/spi/qcom,spi-geni-qcom.txt b/Documentation/devicetree/bindings/spi/qcom,spi-geni-qcom.txt index 31b47d5853bf379e562f0f1cf0f949ce856e9078..04fa17ae4a976f628ee8f3816d7b8397e189f8d0 100644 --- a/Documentation/devicetree/bindings/spi/qcom,spi-geni-qcom.txt +++ b/Documentation/devicetree/bindings/spi/qcom,spi-geni-qcom.txt @@ -29,6 +29,8 @@ Optional properties: - qcom,rt: Specifies if the framework worker thread for this controller device should have "real-time" priority. - qcom,disable-autosuspend: Specifies to disable runtime PM auto suspend. +- qcom,set-miso-sampling: Specifies to set miso sampling rate. +- qcom,miso-sampling-ctrl-val: Specifies the value of miso sampling rate. SPI slave nodes must be children of the SPI master node and can contain the following properties. diff --git a/Documentation/devicetree/bindings/thermal/qti-qmi-sensor.txt b/Documentation/devicetree/bindings/thermal/qti-qmi-sensor.txt index e558027e9c916e606cf16727de43a3de2834250d..32e1690af102f546c4148c95e3e2e3eb78be5e17 100644 --- a/Documentation/devicetree/bindings/thermal/qti-qmi-sensor.txt +++ b/Documentation/devicetree/bindings/thermal/qti-qmi-sensor.txt @@ -57,6 +57,12 @@ Subsystem properties: 24. sys_therm1 25. sys_therm2 26. modem_tsens1 + 27. qfe_ret_pa0_fr1 + 28. qfe_wtr_pa0_fr1 + 29. qfe_wtr_pa1_fr1 + 30. qfe_wtr_pa2_fr1 + 31. qfe_wtr_pa3_fr1 + Example: diff --git a/Documentation/devicetree/bindings/usb/dwc3.txt b/Documentation/devicetree/bindings/usb/dwc3.txt index 9b1404294606edacfc4939473af4cf48b4286c78..512eb7ce1d780c5b3738e7e86e00f9eafd08569c 100644 --- a/Documentation/devicetree/bindings/usb/dwc3.txt +++ b/Documentation/devicetree/bindings/usb/dwc3.txt @@ -50,6 +50,8 @@ Optional properties: during HS transmit. - snps,ssp-u3-u0-quirk: when set, core always changes PHY power state to P2, before attempting a U3 exit handshake. + - snps,parkmode-disable-ss-quirk: when set, all SuperSpeed bus instances in + park mode are disabled. - snps,dis_metastability_quirk: when set, disable metastability workaround. CAUTION: use only if you are absolutely sure of it. - snps,is-utmi-l1-suspend: true when DWC3 asserts output signal diff --git a/Documentation/driver-api/mtdnand.rst b/Documentation/driver-api/mtdnand.rst index 2a5191b6d4459fa76734bcdbad1f21e30278dcd9..15449334b1242dc19b508d504f6ce65bbdead5d3 100644 --- a/Documentation/driver-api/mtdnand.rst +++ b/Documentation/driver-api/mtdnand.rst @@ -277,7 +277,7 @@ unregisters the partitions in the MTD layer. static void __exit board_cleanup (void) { /* Release resources, unregister device */ - nand_release (board_mtd); + nand_release (mtd_to_nand(board_mtd)); /* unmap physical address */ iounmap(baseaddr); diff --git a/Documentation/filesystems/f2fs.txt b/Documentation/filesystems/f2fs.txt index 1ca9556ec849d41f31b0e05c5de026b4e38921d3..76603ab3851b6f5863c57881b1b3709f291a2ce6 100644 --- a/Documentation/filesystems/f2fs.txt +++ b/Documentation/filesystems/f2fs.txt @@ -217,8 +217,12 @@ fsync_mode=%s Control the policy of fsync. Currently supports "posix", pass, but the performance will regress. "nobarrier" is based on "posix", but doesn't issue flush command for non-atomic files likewise "nobarrier" mount option. -test_dummy_encryption Enable dummy encryption, which provides a fake fscrypt +test_dummy_encryption +test_dummy_encryption=%s + Enable dummy encryption, which provides a fake fscrypt context. The fake fscrypt context is used by xfstests. + The argument may be either "v1" or "v2", in order to + select the corresponding fscrypt policy version. checkpoint=%s[:%u[%]] Set to "disable" to turn off checkpointing. Set to "enable" to reenable checkpointing. Is enabled by default. While disabled, any unmounting or unexpected shutdowns will cause diff --git a/Documentation/usb/raw-gadget.rst b/Documentation/usb/raw-gadget.rst index 9e78cb858f861b7ca52b987fcaed9942f3c2610c..68d879a8009ecef069d46c9a241210483b83ef0a 100644 --- a/Documentation/usb/raw-gadget.rst +++ b/Documentation/usb/raw-gadget.rst @@ -27,9 +27,8 @@ differences are: 3. Raw Gadget provides a way to select a UDC device/driver to bind to, while GadgetFS currently binds to the first available UDC. -4. Raw Gadget uses predictable endpoint names (handles) across different - UDCs (as long as UDCs have enough endpoints of each required transfer - type). +4. Raw Gadget explicitly exposes information about endpoints addresses and + capabilities allowing a user to write UDC-agnostic gadgets. 5. Raw Gadget has ioctl-based interface instead of a filesystem-based one. @@ -50,12 +49,36 @@ The typical usage of Raw Gadget looks like: Raw Gadget and react to those depending on what kind of USB device needs to be emulated. +Note, that some UDC drivers have fixed addresses assigned to endpoints, and +therefore arbitrary endpoint addresses can't be used in the descriptors. +Nevertheles, Raw Gadget provides a UDC-agnostic way to write USB gadgets. +Once a USB_RAW_EVENT_CONNECT event is received via USB_RAW_IOCTL_EVENT_FETCH, +the USB_RAW_IOCTL_EPS_INFO ioctl can be used to find out information about +endpoints that the UDC driver has. Based on that information, the user must +chose UDC endpoints that will be used for the gadget being emulated, and +properly assign addresses in endpoint descriptors. + +You can find usage examples (along with a test suite) here: + +https://github.com/xairy/raw-gadget + +Internal details +~~~~~~~~~~~~~~~~ + +Currently every endpoint read/write ioctl submits a USB request and waits until +its completion. This is the desired mode for coverage-guided fuzzing (as we'd +like all USB request processing happen during the lifetime of a syscall), +and must be kept in the implementation. (This might be slow for real world +applications, thus the O_NONBLOCK improvement suggestion below.) + Potential future improvements ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- Implement ioctl's for setting/clearing halt status on endpoints. - -- Reporting more events (suspend, resume, etc.) through - USB_RAW_IOCTL_EVENT_FETCH. +- Report more events (suspend, resume, etc.) through USB_RAW_IOCTL_EVENT_FETCH. - Support O_NONBLOCK I/O. + +- Support USB 3 features (accept SS endpoint companion descriptor when + enabling endpoints; allow providing stream_id for bulk transfers). + +- Support ISO transfer features (expose frame_number for completed requests). diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt index f67ed33d105438166220c44874562700aa8b8770..81a8802cea887a13880b1db55271a8bf50178fae 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt @@ -3737,9 +3737,11 @@ EOI was received. #define KVM_EXIT_HYPERV_SYNIC 1 #define KVM_EXIT_HYPERV_HCALL 2 __u32 type; + __u32 pad1; union { struct { __u32 msr; + __u32 pad2; __u64 control; __u64 evt_page; __u64 msg_page; diff --git a/Makefile b/Makefile index a75d57b348d9db88015b1b6c83ff8a96e17c1ed9..f0d146bece61b08efdf45a908828823f5f216b95 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 4 PATCHLEVEL = 14 -SUBLEVEL = 180 +SUBLEVEL = 190 EXTRAVERSION = NAME = Petit Gorille @@ -504,7 +504,7 @@ ifeq ($(shell $(srctree)/scripts/clang-android.sh $(CC) $(CLANG_FLAGS)), y) $(error "Clang with Android --target detected. Did you specify CLANG_TRIPLE?") endif GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE)elfedit)) -CLANG_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR) +CLANG_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR)$(notdir $(CROSS_COMPILE)) GCC_TOOLCHAIN := $(realpath $(GCC_TOOLCHAIN_DIR)/..) endif ifneq ($(GCC_TOOLCHAIN),) @@ -571,12 +571,8 @@ KBUILD_MODULES := KBUILD_BUILTIN := 1 # If we have only "make modules", don't compile built-in objects. -# When we're building modules with modversions, we need to consider -# the built-in objects during the descend as well, in order to -# make sure the checksums are up to date before we record them. - ifeq ($(MAKECMDGOALS),modules) - KBUILD_BUILTIN := $(if $(CONFIG_MODVERSIONS),1) + KBUILD_BUILTIN := endif # If we have "make modules", compile modules @@ -704,20 +700,14 @@ KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member) KBUILD_CFLAGS += $(call cc-disable-warning, attribute-alias) ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE -KBUILD_CFLAGS += -Os $(call cc-disable-warning,maybe-uninitialized,) -else -ifdef CONFIG_PROFILE_ALL_BRANCHES -KBUILD_CFLAGS += -O2 $(call cc-disable-warning,maybe-uninitialized,) +KBUILD_CFLAGS += -Os else KBUILD_CFLAGS += -O2 endif -endif - -KBUILD_CFLAGS += $(call cc-ifversion, -lt, 0409, \ - $(call cc-disable-warning,maybe-uninitialized,)) # Tell gcc to never replace conditional load with a non-conditional one KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0) +KBUILD_CFLAGS += $(call cc-option,-fno-allow-store-data-races) # check for 'asm goto' ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y) @@ -940,6 +930,17 @@ KBUILD_CFLAGS += $(call cc-disable-warning, pointer-sign) # disable stringop warnings in gcc 8+ KBUILD_CFLAGS += $(call cc-disable-warning, stringop-truncation) +# We'll want to enable this eventually, but it's not going away for 5.7 at least +KBUILD_CFLAGS += $(call cc-disable-warning, zero-length-bounds) +KBUILD_CFLAGS += $(call cc-disable-warning, array-bounds) +KBUILD_CFLAGS += $(call cc-disable-warning, stringop-overflow) + +# Another good warning that we'll want to enable eventually +KBUILD_CFLAGS += $(call cc-disable-warning, restrict) + +# Enabled with W=2, disabled by default as noisy +KBUILD_CFLAGS += $(call cc-disable-warning, maybe-uninitialized) + # disable invalid "can't wrap" optimizations for signed / pointers KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow) @@ -1327,12 +1328,17 @@ endif # needs to be updated, so this check is forced on all builds uts_len := 64 +ifneq (,$(BUILD_NUMBER)) + UTS_RELEASE=$(KERNELRELEASE)-ab$(BUILD_NUMBER) +else + UTS_RELEASE=$(KERNELRELEASE) +endif define filechk_utsrelease.h - if [ `echo -n "$(KERNELRELEASE)" | wc -c ` -gt $(uts_len) ]; then \ - echo '"$(KERNELRELEASE)" exceeds $(uts_len) characters' >&2; \ - exit 1; \ - fi; \ - (echo \#define UTS_RELEASE \"$(KERNELRELEASE)\";) + if [ `echo -n "$(UTS_RELEASE)" | wc -c ` -gt $(uts_len) ]; then \ + echo '"$(UTS_RELEASE)" exceeds $(uts_len) characters' >&2; \ + exit 1; \ + fi; \ + (echo \#define UTS_RELEASE \"$(UTS_RELEASE)\";) endef define filechk_version.h @@ -1422,6 +1428,13 @@ ifdef CONFIG_MODULES all: modules +# When we're building modules with modversions, we need to consider +# the built-in objects during the descend as well, in order to +# make sure the checksums are up to date before we record them. +ifdef CONFIG_MODVERSIONS + KBUILD_BUILTIN := 1 +endif + # Build modules # # A module can be listed more than once in obj-m resulting in diff --git a/arch/alpha/include/asm/uaccess.h b/arch/alpha/include/asm/uaccess.h index 87d8c4f0307d11539c15df2aa8dace09656a6560..7295967b502812cd2282c316e7e6f9c9e6ddce46 100644 --- a/arch/alpha/include/asm/uaccess.h +++ b/arch/alpha/include/asm/uaccess.h @@ -30,11 +30,13 @@ * Address valid if: * - "addr" doesn't have any high-bits set * - AND "size" doesn't have any high-bits set - * - AND "addr+size" doesn't have any high-bits set + * - AND "addr+size-(size != 0)" doesn't have any high-bits set * - OR we are in kernel mode. */ -#define __access_ok(addr, size) \ - ((get_fs().seg & (addr | size | (addr+size))) == 0) +#define __access_ok(addr, size) ({ \ + unsigned long __ao_a = (addr), __ao_b = (size); \ + unsigned long __ao_end = __ao_a + __ao_b - !!__ao_b; \ + (get_fs().seg & (__ao_a | __ao_b | __ao_end)) == 0; }) #define access_ok(type, addr, size) \ ({ \ diff --git a/arch/arc/include/asm/elf.h b/arch/arc/include/asm/elf.h index aa2d6da9d187be21b1f38ac116f40941bb5dc39d..12c74e826530011b37382691fc91e2cb0b0cf2ba 100644 --- a/arch/arc/include/asm/elf.h +++ b/arch/arc/include/asm/elf.h @@ -26,7 +26,7 @@ #define R_ARC_32_PCREL 0x31 /*to set parameters in the core dumps */ -#define ELF_ARCH EM_ARCOMPACT +#define ELF_ARCH EM_ARC_INUSE #define ELF_CLASS ELFCLASS32 #ifdef CONFIG_CPU_BIG_ENDIAN diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S index 85d9ea4a0acccc937a1cf51b29fc0480afa4010d..705a6820842329942f97e62def3677a677b3a494 100644 --- a/arch/arc/kernel/entry.S +++ b/arch/arc/kernel/entry.S @@ -156,7 +156,6 @@ END(EV_Extension) tracesys: ; save EFA in case tracer wants the PC of traced task ; using ERET won't work since next-PC has already committed - lr r12, [efa] GET_CURR_TASK_FIELD_PTR TASK_THREAD, r11 st r12, [r11, THREAD_FAULT_ADDR] ; thread.fault_address @@ -199,15 +198,9 @@ tracesys_exit: ; Breakpoint TRAP ; --------------------------------------------- trap_with_param: - - ; stop_pc info by gdb needs this info - lr r0, [efa] + mov r0, r12 ; EFA in case ptracer/gdb wants stop_pc mov r1, sp - ; Now that we have read EFA, it is safe to do "fake" rtie - ; and get out of CPU exception mode - FAKE_RET_FROM_EXCPN - ; Save callee regs in case gdb wants to have a look ; SP will grow up by size of CALLEE Reg-File ; NOTE: clobbers r12 @@ -234,6 +227,10 @@ ENTRY(EV_Trap) EXCEPTION_PROLOGUE + lr r12, [efa] + + FAKE_RET_FROM_EXCPN + ;============ TRAP 1 :breakpoints ; Check ECR for trap with arg (PROLOGUE ensures r9 has ECR) bmsk.f 0, r9, 7 @@ -241,9 +238,6 @@ ENTRY(EV_Trap) ;============ TRAP (no param): syscall top level - ; First return from Exception to pure K mode (Exception/IRQs renabled) - FAKE_RET_FROM_EXCPN - ; If syscall tracing ongoing, invoke pre-post-hooks GET_CURR_THR_INFO_FLAGS r10 btst r10, TIF_SYSCALL_TRACE diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c index 6b8d106e0d53f78bbe684d1e09ff9e05db274748..11c2c4a3fe691f5c50e534d68dc9f8fd1281d670 100644 --- a/arch/arc/kernel/setup.c +++ b/arch/arc/kernel/setup.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -355,12 +356,12 @@ static void arc_chk_core_config(void) if ((unsigned int)__arc_dccm_base != cpu->dccm.base_addr) panic("Linux built with incorrect DCCM Base address\n"); - if (CONFIG_ARC_DCCM_SZ != cpu->dccm.sz) + if (CONFIG_ARC_DCCM_SZ * SZ_1K != cpu->dccm.sz) panic("Linux built with incorrect DCCM Size\n"); #endif #ifdef CONFIG_ARC_HAS_ICCM - if (CONFIG_ARC_ICCM_SZ != cpu->iccm.sz) + if (CONFIG_ARC_ICCM_SZ * SZ_1K != cpu->iccm.sz) panic("Linux built with incorrect ICCM Size\n"); #endif diff --git a/arch/arc/plat-eznps/Kconfig b/arch/arc/plat-eznps/Kconfig index ce908e2c5282434db3643ec54674544b533877d2..71378bfec8d087e8acdb20d2bf4ca6db7aaeba0c 100644 --- a/arch/arc/plat-eznps/Kconfig +++ b/arch/arc/plat-eznps/Kconfig @@ -6,6 +6,7 @@ menuconfig ARC_PLAT_EZNPS bool "\"EZchip\" ARC dev platform" + depends on ISA_ARCOMPACT select CPU_BIG_ENDIAN select CLKSRC_NPS if !PHYS_ADDR_T_64BIT select EZNPS_GIC diff --git a/arch/arm/boot/dts/bcm-nsp.dtsi b/arch/arm/boot/dts/bcm-nsp.dtsi index 1792192001a22da9e563963d7c715e019d5accde..e975f9cabe84b6a1deedf6b23df3a7ad5942aee1 100644 --- a/arch/arm/boot/dts/bcm-nsp.dtsi +++ b/arch/arm/boot/dts/bcm-nsp.dtsi @@ -249,10 +249,10 @@ status = "disabled"; }; - mailbox: mailbox@25000 { + mailbox: mailbox@25c00 { compatible = "brcm,iproc-fa2-mbox"; - reg = <0x25000 0x445>; - interrupts = ; + reg = <0x25c00 0x400>; + interrupts = ; #mbox-cells = <1>; brcm,rx-status-len = <32>; brcm,use-bcm-hdr; diff --git a/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts b/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts index e5f2cca86f04483c428869ee1e82ae65373d684b..120776d45441b7c09cea6eeefed46a898cc25eb6 100644 --- a/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts +++ b/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts @@ -25,7 +25,7 @@ leds { act { - gpios = <&gpio 47 GPIO_ACTIVE_HIGH>; + gpios = <&gpio 47 GPIO_ACTIVE_LOW>; }; }; diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi index f271c564d57d74f8e585f94662c9144468c5e13d..0bfd932fa5e5418c66787c8c276e4bcb077d1a7a 100644 --- a/arch/arm/boot/dts/dra7.dtsi +++ b/arch/arm/boot/dts/dra7.dtsi @@ -289,6 +289,7 @@ #address-cells = <1>; ranges = <0x51000000 0x51000000 0x3000 0x0 0x20000000 0x10000000>; + dma-ranges; /** * To enable PCI endpoint mode, disable the pcie1_rc * node and enable pcie1_ep mode. @@ -303,7 +304,6 @@ device_type = "pci"; ranges = <0x81000000 0 0 0x03000 0 0x00010000 0x82000000 0 0x20013000 0x13000 0 0xffed000>; - dma-ranges = <0x02000000 0x0 0x00000000 0x00000000 0x1 0x00000000>; bus-range = <0x00 0xff>; #interrupt-cells = <1>; num-lanes = <1>; @@ -347,6 +347,7 @@ #address-cells = <1>; ranges = <0x51800000 0x51800000 0x3000 0x0 0x30000000 0x10000000>; + dma-ranges; status = "disabled"; pcie@51800000 { compatible = "ti,dra7-pcie"; @@ -358,7 +359,6 @@ device_type = "pci"; ranges = <0x81000000 0 0 0x03000 0 0x00010000 0x82000000 0 0x30013000 0x13000 0 0xffed000>; - dma-ranges = <0x02000000 0x0 0x00000000 0x00000000 0x1 0x00000000>; bus-range = <0x00 0xff>; #interrupt-cells = <1>; num-lanes = <1>; diff --git a/arch/arm/boot/dts/imx27-phytec-phycard-s-rdk.dts b/arch/arm/boot/dts/imx27-phytec-phycard-s-rdk.dts index bfd4946cf9fe7ebc62f867a4f9a9dc774aabfd78..8b63b6593d3af42a5826ddcbef59d7298e6f85a4 100644 --- a/arch/arm/boot/dts/imx27-phytec-phycard-s-rdk.dts +++ b/arch/arm/boot/dts/imx27-phytec-phycard-s-rdk.dts @@ -81,8 +81,8 @@ imx27-phycard-s-rdk { pinctrl_i2c1: i2c1grp { fsl,pins = < - MX27_PAD_I2C2_SDA__I2C2_SDA 0x0 - MX27_PAD_I2C2_SCL__I2C2_SCL 0x0 + MX27_PAD_I2C_DATA__I2C_DATA 0x0 + MX27_PAD_I2C_CLK__I2C_CLK 0x0 >; }; diff --git a/arch/arm/boot/dts/imx6q-b450v3.dts b/arch/arm/boot/dts/imx6q-b450v3.dts index 404a93d9596ba6ed04510a49dd25a7bd5961f0e1..dc7d65da7d01d72e0aadeaa9bc610a4d575dc827 100644 --- a/arch/arm/boot/dts/imx6q-b450v3.dts +++ b/arch/arm/boot/dts/imx6q-b450v3.dts @@ -65,13 +65,6 @@ }; }; -&clks { - assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>, - <&clks IMX6QDL_CLK_LDB_DI1_SEL>; - assigned-clock-parents = <&clks IMX6QDL_CLK_PLL3_USB_OTG>, - <&clks IMX6QDL_CLK_PLL3_USB_OTG>; -}; - &ldb { status = "okay"; diff --git a/arch/arm/boot/dts/imx6q-b650v3.dts b/arch/arm/boot/dts/imx6q-b650v3.dts index 7f9f176901d4c5842fbf35ed33cf61e7192222ca..101d61f93070e5f7f651cc1dde9c86f23902f479 100644 --- a/arch/arm/boot/dts/imx6q-b650v3.dts +++ b/arch/arm/boot/dts/imx6q-b650v3.dts @@ -65,13 +65,6 @@ }; }; -&clks { - assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>, - <&clks IMX6QDL_CLK_LDB_DI1_SEL>; - assigned-clock-parents = <&clks IMX6QDL_CLK_PLL3_USB_OTG>, - <&clks IMX6QDL_CLK_PLL3_USB_OTG>; -}; - &ldb { status = "okay"; diff --git a/arch/arm/boot/dts/imx6q-b850v3.dts b/arch/arm/boot/dts/imx6q-b850v3.dts index 46bdc67227157cfd146986d0237abdf4c5cf2226..8fc831dc31564bcb61657842362a93e749cd7ce3 100644 --- a/arch/arm/boot/dts/imx6q-b850v3.dts +++ b/arch/arm/boot/dts/imx6q-b850v3.dts @@ -53,17 +53,6 @@ }; }; -&clks { - assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>, - <&clks IMX6QDL_CLK_LDB_DI1_SEL>, - <&clks IMX6QDL_CLK_IPU1_DI0_PRE_SEL>, - <&clks IMX6QDL_CLK_IPU2_DI0_PRE_SEL>; - assigned-clock-parents = <&clks IMX6QDL_CLK_PLL5_VIDEO_DIV>, - <&clks IMX6QDL_CLK_PLL5_VIDEO_DIV>, - <&clks IMX6QDL_CLK_PLL2_PFD2_396M>, - <&clks IMX6QDL_CLK_PLL2_PFD2_396M>; -}; - &ldb { fsl,dual-channel; status = "okay"; diff --git a/arch/arm/boot/dts/imx6q-bx50v3.dtsi b/arch/arm/boot/dts/imx6q-bx50v3.dtsi index 1015e55ca8f7bd9eaa2edf68ed499f577fbac083..f3c2c55876166984533f1e363579ac4416843c43 100644 --- a/arch/arm/boot/dts/imx6q-bx50v3.dtsi +++ b/arch/arm/boot/dts/imx6q-bx50v3.dtsi @@ -92,6 +92,56 @@ mux-int-port = <1>; mux-ext-port = <4>; }; + + aliases { + mdio-gpio0 = &mdio0; + }; + + mdio0: mdio-gpio { + compatible = "virtual,mdio-gpio"; + gpios = <&gpio2 5 GPIO_ACTIVE_HIGH>, /* mdc */ + <&gpio2 7 GPIO_ACTIVE_HIGH>; /* mdio */ + + #address-cells = <1>; + #size-cells = <0>; + + switch@0 { + compatible = "marvell,mv88e6085"; /* 88e6240*/ + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + + switch_ports: ports { + #address-cells = <1>; + #size-cells = <0>; + }; + + mdio { + #address-cells = <1>; + #size-cells = <0>; + + switchphy0: switchphy@0 { + reg = <0>; + }; + + switchphy1: switchphy@1 { + reg = <1>; + }; + + switchphy2: switchphy@2 { + reg = <2>; + }; + + switchphy3: switchphy@3 { + reg = <3>; + }; + + switchphy4: switchphy@4 { + reg = <4>; + }; + }; + }; + }; }; &ecspi5 { @@ -326,3 +376,30 @@ tcxo-clock-frequency = <26000000>; }; }; + +&pcie { + /* Synopsys, Inc. Device */ + pci_root: root@0,0 { + compatible = "pci16c3,abcd"; + reg = <0x00000000 0 0 0 0>; + + #address-cells = <3>; + #size-cells = <2>; + #interrupt-cells = <1>; + }; +}; + +&clks { + assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>, + <&clks IMX6QDL_CLK_LDB_DI1_SEL>, + <&clks IMX6QDL_CLK_IPU1_DI0_PRE_SEL>, + <&clks IMX6QDL_CLK_IPU1_DI1_PRE_SEL>, + <&clks IMX6QDL_CLK_IPU2_DI0_PRE_SEL>, + <&clks IMX6QDL_CLK_IPU2_DI1_PRE_SEL>; + assigned-clock-parents = <&clks IMX6QDL_CLK_PLL5_VIDEO_DIV>, + <&clks IMX6QDL_CLK_PLL5_VIDEO_DIV>, + <&clks IMX6QDL_CLK_PLL2_PFD0_352M>, + <&clks IMX6QDL_CLK_PLL2_PFD0_352M>, + <&clks IMX6QDL_CLK_PLL2_PFD0_352M>, + <&clks IMX6QDL_CLK_PLL2_PFD0_352M>; +}; diff --git a/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi b/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi index bcced922b2807a123497bd0dcb0526aa517b6dde..b4779b0ece96d3b486bd005128bd081064852b2c 100644 --- a/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi +++ b/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi @@ -16,8 +16,10 @@ #interrupt-cells = <2>; #address-cells = <1>; #size-cells = <0>; - spi-max-frequency = <3000000>; + spi-max-frequency = <9600000>; spi-cs-high; + spi-cpol; + spi-cpha; cpcap_adc: adc { compatible = "motorola,mapphone-cpcap-adc"; diff --git a/arch/arm/boot/dts/r8a73a4.dtsi b/arch/arm/boot/dts/r8a73a4.dtsi index 310222634570d98f19cbad69b3473ba6d0e20798..2e4fce897814f8819dd4de92bce88b90186d8718 100644 --- a/arch/arm/boot/dts/r8a73a4.dtsi +++ b/arch/arm/boot/dts/r8a73a4.dtsi @@ -133,7 +133,14 @@ cmt1: timer@e6130000 { compatible = "renesas,cmt-48-r8a73a4", "renesas,cmt-48-gen2"; reg = <0 0xe6130000 0 0x1004>; - interrupts = ; + interrupts = , + , + , + , + , + , + , + ; clocks = <&mstp3_clks R8A73A4_CLK_CMT1>; clock-names = "fck"; power-domains = <&pd_c5>; diff --git a/arch/arm/boot/dts/r8a7740.dtsi b/arch/arm/boot/dts/r8a7740.dtsi index d37d22682a632035f79a3fc1b2f7634e2d90e8b0..9a120628af0ca6465434b6dcab41a128a9a2ab8f 100644 --- a/arch/arm/boot/dts/r8a7740.dtsi +++ b/arch/arm/boot/dts/r8a7740.dtsi @@ -467,7 +467,7 @@ cpg_clocks: cpg_clocks@e6150000 { compatible = "renesas,r8a7740-cpg-clocks"; reg = <0xe6150000 0x10000>; - clocks = <&extal1_clk>, <&extalr_clk>; + clocks = <&extal1_clk>, <&extal2_clk>, <&extalr_clk>; #clock-cells = <1>; clock-output-names = "system", "pllc0", "pllc1", "pllc2", "r", diff --git a/arch/arm/boot/dts/rk3228-evb.dts b/arch/arm/boot/dts/rk3228-evb.dts index 1be9daacc4f91448ad9f467376922f065385ab53..b69c842d830626df4317dd9c30128f8f5e084c6f 100644 --- a/arch/arm/boot/dts/rk3228-evb.dts +++ b/arch/arm/boot/dts/rk3228-evb.dts @@ -84,7 +84,7 @@ #address-cells = <1>; #size-cells = <0>; - phy: phy@0 { + phy: ethernet-phy@0 { compatible = "ethernet-phy-id1234.d400", "ethernet-phy-ieee802.3-c22"; reg = <0>; clocks = <&cru SCLK_MAC_PHY>; diff --git a/arch/arm/boot/dts/rk322x.dtsi b/arch/arm/boot/dts/rk322x.dtsi index f59f7cc62be690e8750991111ad9fd03d892b5a5..0c60dbc4b46a1939b020f4f5adc6d652f9a75c56 100644 --- a/arch/arm/boot/dts/rk322x.dtsi +++ b/arch/arm/boot/dts/rk322x.dtsi @@ -950,7 +950,7 @@ }; }; - spi-0 { + spi0 { spi0_clk: spi0-clk { rockchip,pins = <0 9 RK_FUNC_2 &pcfg_pull_up>; }; @@ -968,7 +968,7 @@ }; }; - spi-1 { + spi1 { spi1_clk: spi1-clk { rockchip,pins = <0 23 RK_FUNC_2 &pcfg_pull_up>; }; diff --git a/arch/arm/boot/dts/socfpga.dtsi b/arch/arm/boot/dts/socfpga.dtsi index 10d2fa183a9ff417f5165a2d8808a991cf96fd22..7ee99e11508cabc0acee1f9188107d97a8adb009 100644 --- a/arch/arm/boot/dts/socfpga.dtsi +++ b/arch/arm/boot/dts/socfpga.dtsi @@ -706,7 +706,7 @@ }; }; - L2: l2-cache@fffef000 { + L2: cache-controller@fffef000 { compatible = "arm,pl310-cache"; reg = <0xfffef000 0x1000>; interrupts = <0 38 0x04>; diff --git a/arch/arm/boot/dts/socfpga_arria10.dtsi b/arch/arm/boot/dts/socfpga_arria10.dtsi index bd1985694bcae14bce4b5228b8bd40cbbea98e36..672e73e35228c406969ed30fa1cfa17e0effe360 100644 --- a/arch/arm/boot/dts/socfpga_arria10.dtsi +++ b/arch/arm/boot/dts/socfpga_arria10.dtsi @@ -606,7 +606,7 @@ reg = <0xffcfb100 0x80>; }; - L2: l2-cache@fffff000 { + L2: cache-controller@fffff000 { compatible = "arm,pl310-cache"; reg = <0xfffff000 0x1000>; interrupts = <0 18 IRQ_TYPE_LEVEL_HIGH>; diff --git a/arch/arm/configs/mdm9607-128mb-perf_defconfig b/arch/arm/configs/mdm9607-128mb-perf_defconfig new file mode 120000 index 0000000000000000000000000000000000000000..4e697dfad8b1b183258fc4a239fa3a7f8012f4a9 --- /dev/null +++ b/arch/arm/configs/mdm9607-128mb-perf_defconfig @@ -0,0 +1 @@ +vendor/mdm9607-128mb-perf_defconfig \ No newline at end of file diff --git a/arch/arm/configs/mdm9607-perf_defconfig b/arch/arm/configs/mdm9607-perf_defconfig new file mode 120000 index 0000000000000000000000000000000000000000..55a1d6a21ade646139681715433d2ebe4f23e0f3 --- /dev/null +++ b/arch/arm/configs/mdm9607-perf_defconfig @@ -0,0 +1 @@ +vendor/mdm9607-perf_defconfig \ No newline at end of file diff --git a/arch/arm/configs/mdm9607_defconfig b/arch/arm/configs/mdm9607_defconfig deleted file mode 100644 index f421267ac6bb2b469ab900fa68d490f1b7d4ccc6..0000000000000000000000000000000000000000 --- a/arch/arm/configs/mdm9607_defconfig +++ /dev/null @@ -1,267 +0,0 @@ -CONFIG_SYSVIPC=y -CONFIG_AUDIT=y -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y -CONFIG_IKCONFIG=y -CONFIG_IKCONFIG_PROC=y -CONFIG_CGROUPS=y -CONFIG_CGROUP_SCHED=y -CONFIG_NAMESPACES=y -CONFIG_BLK_DEV_INITRD=y -CONFIG_CC_OPTIMIZE_FOR_SIZE=y -CONFIG_KALLSYMS_ALL=y -CONFIG_EMBEDDED=y -CONFIG_PROFILING=y -CONFIG_CC_STACKPROTECTOR_REGULAR=y -CONFIG_MODULES=y -CONFIG_MODULE_UNLOAD=y -CONFIG_MODULE_FORCE_UNLOAD=y -CONFIG_ARCH_QCOM=y -CONFIG_ARCH_MDM9607=y -CONFIG_PREEMPT=y -CONFIG_CMA=y -CONFIG_CPU_FREQ=y -CONFIG_CPU_IDLE=y -CONFIG_VFP=y -CONFIG_NEON=y -CONFIG_PM_AUTOSLEEP=y -CONFIG_NET=y -CONFIG_PACKET=y -CONFIG_UNIX=y -CONFIG_INET=y -CONFIG_IP_MULTICAST=y -CONFIG_IP_ADVANCED_ROUTER=y -CONFIG_IP_MULTIPLE_TABLES=y -CONFIG_IP_MROUTE=y -CONFIG_IP_MROUTE_MULTIPLE_TABLES=y -CONFIG_IP_PIMSM_V2=y -CONFIG_IPV6_MULTIPLE_TABLES=y -CONFIG_IPV6_SUBTREES=y -CONFIG_IPV6_MROUTE=y -CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y -CONFIG_IPV6_PIMSM_V2=y -CONFIG_NETFILTER=y -CONFIG_BRIDGE_NETFILTER=y -CONFIG_NF_CONNTRACK=y -CONFIG_NF_CONNTRACK_EVENTS=y -CONFIG_NF_CONNTRACK_TIMEOUT=y -CONFIG_NF_CONNTRACK_TIMESTAMP=y -CONFIG_NF_CONNTRACK_AMANDA=y -CONFIG_NF_CONNTRACK_FTP=y -CONFIG_NF_CONNTRACK_H323=y -CONFIG_NF_CONNTRACK_IRC=y -CONFIG_NF_CONNTRACK_NETBIOS_NS=y -CONFIG_NF_CONNTRACK_SNMP=y -CONFIG_NF_CONNTRACK_PPTP=y -CONFIG_NF_CONNTRACK_SIP=y -CONFIG_NF_CONNTRACK_TFTP=y -CONFIG_NF_CT_NETLINK=y -CONFIG_NF_CT_NETLINK_TIMEOUT=y -CONFIG_NETFILTER_XT_TARGET_LOG=y -CONFIG_NETFILTER_XT_TARGET_MARK=y -CONFIG_NETFILTER_XT_TARGET_NFLOG=y -CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y -CONFIG_NETFILTER_XT_TARGET_NOTRACK=y -CONFIG_NETFILTER_XT_TARGET_TPROXY=y -CONFIG_NETFILTER_XT_TARGET_TRACE=y -CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=y -CONFIG_NETFILTER_XT_MATCH_CONNLABEL=y -CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y -CONFIG_NETFILTER_XT_MATCH_CONNMARK=y -CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y -CONFIG_NETFILTER_XT_MATCH_DSCP=y -CONFIG_NETFILTER_XT_MATCH_ESP=y -CONFIG_NETFILTER_XT_MATCH_IPRANGE=y -CONFIG_NETFILTER_XT_MATCH_LIMIT=y -CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y -CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m -CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y -CONFIG_NETFILTER_XT_MATCH_STATE=y -CONFIG_IP_SET=y -CONFIG_NF_CONNTRACK_IPV4=y -CONFIG_IP_NF_IPTABLES=y -CONFIG_IP_NF_MATCH_AH=y -CONFIG_IP_NF_MATCH_ECN=y -CONFIG_IP_NF_MATCH_TTL=y -CONFIG_IP_NF_FILTER=y -CONFIG_IP_NF_TARGET_REJECT=y -CONFIG_IP_NF_NAT=y -CONFIG_IP_NF_TARGET_MASQUERADE=y -CONFIG_IP_NF_TARGET_NATTYPE_MODULE=y -CONFIG_IP_NF_TARGET_NETMAP=y -CONFIG_IP_NF_TARGET_REDIRECT=y -CONFIG_IP_NF_MANGLE=y -CONFIG_IP_NF_TARGET_ECN=y -CONFIG_IP_NF_TARGET_TTL=y -CONFIG_IP_NF_RAW=y -CONFIG_IP_NF_ARPTABLES=y -CONFIG_IP_NF_ARPFILTER=y -CONFIG_IP_NF_ARP_MANGLE=y -CONFIG_NF_CONNTRACK_IPV6=y -CONFIG_IP6_NF_IPTABLES=y -CONFIG_IP6_NF_MATCH_AH=y -CONFIG_IP6_NF_MATCH_FRAG=y -CONFIG_IP6_NF_MATCH_OPTS=y -CONFIG_IP6_NF_MATCH_HL=y -CONFIG_IP6_NF_MATCH_IPV6HEADER=y -CONFIG_IP6_NF_MATCH_MH=y -CONFIG_IP6_NF_MATCH_RT=y -CONFIG_IP6_NF_FILTER=y -CONFIG_IP6_NF_TARGET_REJECT=y -CONFIG_IP6_NF_MANGLE=y -CONFIG_IP6_NF_RAW=y -CONFIG_BRIDGE_NF_EBTABLES=y -CONFIG_BRIDGE_EBT_BROUTE=y -CONFIG_BRIDGE_EBT_T_FILTER=y -CONFIG_BRIDGE_EBT_T_NAT=y -CONFIG_BRIDGE_EBT_ARP=y -CONFIG_BRIDGE_EBT_IP=y -CONFIG_BRIDGE_EBT_IP6=y -CONFIG_BRIDGE_EBT_ARPREPLY=y -CONFIG_BRIDGE_EBT_DNAT=y -CONFIG_BRIDGE_EBT_SNAT=y -CONFIG_BRIDGE=y -CONFIG_NET_SCHED=y -CONFIG_NET_SCH_PRIO=y -CONFIG_RMNET_DATA=y -CONFIG_RMNET_DATA_FC=y -CONFIG_RMNET_DATA_DEBUG_PKT=y -CONFIG_CAN=y -CONFIG_CAN_VCAN=y -CONFIG_DMA_CMA=y -CONFIG_MTD=y -CONFIG_MTD_CMDLINE_PARTS=y -CONFIG_MTD_BLOCK=y -CONFIG_MTD_MSM_QPIC_NAND=y -CONFIG_MTD_NAND=y -CONFIG_MTD_UBI=y -CONFIG_BLK_DEV_LOOP=y -CONFIG_BLK_DEV_RAM=y -CONFIG_SCSI=y -CONFIG_BLK_DEV_SD=y -CONFIG_CHR_DEV_SG=y -CONFIG_CHR_DEV_SCH=y -CONFIG_SCSI_CONSTANTS=y -CONFIG_SCSI_LOGGING=y -CONFIG_SCSI_SCAN_ASYNC=y -CONFIG_NETDEVICES=y -CONFIG_TUN=y -CONFIG_KS8851=y -CONFIG_QCOM_EMAC=m -CONFIG_AT803X_PHY=m -CONFIG_PPP=y -CONFIG_PPP_ASYNC=y -CONFIG_INPUT_EVDEV=y -CONFIG_INPUT_MISC=y -CONFIG_INPUT_UINPUT=y -CONFIG_INPUT_GPIO=m -CONFIG_SERIAL_MSM=y -CONFIG_SERIAL_MSM_CONSOLE=y -CONFIG_SERIAL_MSM_HS=y -CONFIG_HW_RANDOM=y -CONFIG_HW_RANDOM_MSM_LEGACY=y -CONFIG_I2C=y -CONFIG_I2C_CHARDEV=y -CONFIG_I2C_MSM_V2=y -CONFIG_SPI=y -CONFIG_SPI_QUP=y -CONFIG_SPI_SPIDEV=m -CONFIG_SPMI=y -CONFIG_SPMI_MSM_PMIC_ARB_DEBUG=y -CONFIG_PPS=y -CONFIG_PPS_CLIENT_GPIO=y -CONFIG_PINCTRL_MDM9607=y -CONFIG_GPIOLIB=y -CONFIG_DEBUG_GPIO=y -CONFIG_GPIO_SYSFS=y -CONFIG_POWER_RESET=y -CONFIG_POWER_RESET_QCOM=y -CONFIG_QCOM_DLOAD_MODE=y -CONFIG_POWER_SUPPLY=y -CONFIG_REGULATOR_FIXED_VOLTAGE=y -CONFIG_REGULATOR_CPR=y -CONFIG_REGULATOR_MEM_ACC=y -CONFIG_REGULATOR_SPM=y -CONFIG_REGULATOR_STUB=y -CONFIG_USB=y -CONFIG_USB_ANNOUNCE_NEW_DEVICES=y -CONFIG_USB_MON=y -CONFIG_USB_EHCI_HCD=y -CONFIG_USB_EHCI_MSM=y -CONFIG_USB_EHCI_HCD_PLATFORM=y -CONFIG_USB_GADGET=y -CONFIG_USB_GADGET_DEBUG_FILES=y -CONFIG_USB_GADGET_DEBUG_FS=y -CONFIG_USB_GADGET_VBUS_DRAW=500 -CONFIG_USB_CI13XXX_MSM=y -CONFIG_USB_CONFIGFS=y -CONFIG_USB_CONFIGFS_SERIAL=y -CONFIG_USB_CONFIGFS_F_FS=y -CONFIG_USB_CONFIGFS_F_ACC=y -CONFIG_USB_CONFIGFS_UEVENT=y -CONFIG_USB_CONFIGFS_F_DIAG=y -CONFIG_MMC=y -CONFIG_MMC_PERF_PROFILING=y -CONFIG_MMC_BLOCK_MINORS=32 -CONFIG_MMC_TEST=m -CONFIG_MMC_PARANOID_SD_INIT=y -CONFIG_MMC_CLKGATE=y -CONFIG_MMC_SDHCI=y -CONFIG_MMC_SDHCI_PLTFM=y -CONFIG_MMC_SDHCI_MSM=y -CONFIG_RTC_CLASS=y -CONFIG_RTC_DRV_QPNP=y -CONFIG_DMADEVICES=y -CONFIG_QCOM_SPS_DMA=y -CONFIG_UIO=y -CONFIG_STAGING=y -CONFIG_SPS=y -CONFIG_SPS_SUPPORT_NDP_BAM=y -CONFIG_GSI=y -CONFIG_COMMON_CLK_MSM=y -CONFIG_MSM_CLK_CONTROLLER_V2=y -CONFIG_HWSPINLOCK_QCOM=y -CONFIG_MAILBOX=y -CONFIG_RPMSG_QCOM_SMD=y -CONFIG_QCOM_SMEM=y -CONFIG_MSM_SPM=y -CONFIG_QCOM_SCM=y -CONFIG_QCOM_MEMORY_DUMP_V2=y -CONFIG_QCOM_WATCHDOG_V2=y -CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y -CONFIG_QCOM_SMP2P=y -CONFIG_QCOM_SMSM=y -CONFIG_MSM_BOOT_STATS=y -CONFIG_MSM_BAM_DMUX=y -CONFIG_QCOM_BUS_SCALING=y -CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y -CONFIG_IIO=y -CONFIG_IIO_BUFFER=y -CONFIG_IIO_BUFFER_CB=y -CONFIG_QTI_MPM=y -CONFIG_MSM_TZ_LOG=y -CONFIG_VFAT_FS=y -CONFIG_TMPFS=y -CONFIG_UBIFS_FS=y -CONFIG_UBIFS_FS_ADVANCED_COMPR=y -CONFIG_PRINTK_TIME=y -CONFIG_DYNAMIC_DEBUG=y -CONFIG_DEBUG_INFO=y -CONFIG_MAGIC_SYSRQ=y -CONFIG_DEBUG_PAGEALLOC=y -CONFIG_DEBUG_KMEMLEAK=y -CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y -CONFIG_DEBUG_STACK_USAGE=y -CONFIG_DEBUG_MEMORY_INIT=y -CONFIG_PANIC_ON_RECURSIVE_FAULT=y -CONFIG_PANIC_TIMEOUT=5 -CONFIG_DEBUG_MUTEXES=y -CONFIG_IPC_LOGGING=y -CONFIG_BLK_DEV_IO_TRACE=y -CONFIG_SECURITY=y -CONFIG_SECURITYFS=y -CONFIG_SECURITY_SELINUX=y -CONFIG_SECURITY_SELINUX_BOOTPARAM=y -CONFIG_SECURITY_SELINUX_DISABLE=y -CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1 -CONFIG_SECURITY_SMACK=y diff --git a/arch/arm/configs/mdm9607_defconfig b/arch/arm/configs/mdm9607_defconfig new file mode 120000 index 0000000000000000000000000000000000000000..4e231964ec0c5ad544bc8656bb3b6dc641dacf0f --- /dev/null +++ b/arch/arm/configs/mdm9607_defconfig @@ -0,0 +1 @@ +vendor/mdm9607_defconfig \ No newline at end of file diff --git a/arch/arm/configs/vendor/sdm429-bg-perf_defconfig b/arch/arm/configs/vendor/sdm429-bg-perf_defconfig index d45127d9f1f292222daa9aa1f629d98ac8022744..a39dba8448ef50ac42761a1ef359d4e7605193a3 100644 --- a/arch/arm/configs/vendor/sdm429-bg-perf_defconfig +++ b/arch/arm/configs/vendor/sdm429-bg-perf_defconfig @@ -244,6 +244,7 @@ CONFIG_RFKILL=y CONFIG_NFC_NQ=y CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y CONFIG_REGMAP_WCD_IRQ=y +CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y CONFIG_DMA_CMA=y # CONFIG_QCOM_EBI2 is not set CONFIG_ZRAM=y @@ -256,15 +257,6 @@ CONFIG_HDCP_QSEECOM=y CONFIG_QSEECOM=y CONFIG_UID_SYS_STATS=y CONFIG_QPNP_MISC=y -CONFIG_SCSI=y -CONFIG_BLK_DEV_SD=y -CONFIG_CHR_DEV_SG=y -CONFIG_CHR_DEV_SCH=y -CONFIG_SCSI_CONSTANTS=y -CONFIG_SCSI_SCAN_ASYNC=y -CONFIG_SCSI_UFSHCD=y -CONFIG_SCSI_UFSHCD_PLATFORM=y -CONFIG_SCSI_UFS_QCOM=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y CONFIG_DM_CRYPT=y @@ -316,6 +308,7 @@ CONFIG_MSM_RMNET_BAM=y # CONFIG_NET_VENDOR_TEHUTI is not set # CONFIG_NET_VENDOR_TI is not set # CONFIG_NET_VENDOR_SYNOPSYS is not set +CONFIG_PHYLIB=y CONFIG_PPP=y CONFIG_PPP_BSDCOMP=y CONFIG_PPP_DEFLATE=y @@ -329,8 +322,6 @@ CONFIG_PPPOLAC=y CONFIG_PPPOPNS=y CONFIG_PPP_ASYNC=y CONFIG_PPP_SYNC_TTY=y -CONFIG_USB_RTL8152=y -CONFIG_USB_USBNET=y # CONFIG_WLAN_VENDOR_ADMTEK is not set # CONFIG_WLAN_VENDOR_ATMEL is not set # CONFIG_WLAN_VENDOR_BROADCOM is not set @@ -410,46 +401,11 @@ CONFIG_REGULATOR_STUB=y # CONFIG_RC_CORE is not set CONFIG_MEDIA_SUPPORT=y CONFIG_MEDIA_CAMERA_SUPPORT=y -CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y -CONFIG_MEDIA_CONTROLLER=y -CONFIG_VIDEO_V4L2_SUBDEV_API=y CONFIG_VIDEO_ADV_DEBUG=y CONFIG_VIDEO_FIXED_MINOR_RANGES=y CONFIG_V4L_PLATFORM_DRIVERS=y -CONFIG_MSM_CAMERA=y -CONFIG_MSM_CAMERA_DEBUG=y -CONFIG_MSMB_CAMERA=y -CONFIG_MSMB_CAMERA_DEBUG=y -CONFIG_MSM_CAMERA_SENSOR=y -CONFIG_MSM_CPP=y -CONFIG_MSM_CCI=y -CONFIG_MSM_CSI20_HEADER=y -CONFIG_MSM_CSI22_HEADER=y -CONFIG_MSM_CSI30_HEADER=y -CONFIG_MSM_CSI31_HEADER=y -CONFIG_MSM_CSIPHY=y -CONFIG_MSM_CSID=y -CONFIG_MSM_EEPROM=y -CONFIG_MSM_ISPIF=y -CONFIG_MSM_DUAL_ISP_SYNC=y -CONFIG_IMX134=y -CONFIG_IMX132=y -CONFIG_OV9724=y -CONFIG_OV5648=y -CONFIG_GC0339=y -CONFIG_OV8825=y -CONFIG_OV8865=y -CONFIG_s5k4e1=y -CONFIG_OV12830=y -CONFIG_MSM_V4L2_VIDEO_OVERLAY_DEVICE=y -CONFIG_MSMB_JPEG=y -CONFIG_MSM_FD=y -CONFIG_MSM_JPEGDMA=y CONFIG_MSM_VIDC_3X_V4L2=y CONFIG_MSM_VIDC_3X_GOVERNORS=y -CONFIG_DVB_MPQ=m -CONFIG_DVB_MPQ_DEMUX=m -CONFIG_DVB_MPQ_SW=y CONFIG_FB=y CONFIG_FB_ARMCLCD=y CONFIG_FB_VIRTUAL=y @@ -464,22 +420,14 @@ CONFIG_SOUND=y CONFIG_SND=y CONFIG_SND_DYNAMIC_MINORS=y CONFIG_SND_USB_AUDIO=y -CONFIG_SND_USB_AUDIO_QMI=y CONFIG_SND_SOC=y CONFIG_SND_SOC_TFA98XX=y CONFIG_UHID=y CONFIG_USB=y -CONFIG_USB_ANNOUNCE_NEW_DEVICES=y -CONFIG_USB_EHCI_HCD=y -CONFIG_USB_EHCI_MSM=y -CONFIG_USB_EHCI_HCD_PLATFORM=y -CONFIG_USB_STORAGE=y -CONFIG_USB_EHSET_TEST_FIXTURE=y -CONFIG_NOP_USB_XCEIV=y -CONFIG_USB_QCOM_EMU_PHY=y CONFIG_DUAL_ROLE_USB_INTF=y CONFIG_USB_GADGET=y CONFIG_USB_GADGET_DEBUG_FILES=y +CONFIG_USB_GADGET_DEBUG_FS=y CONFIG_USB_GADGET_VBUS_DRAW=500 CONFIG_USB_CI13XXX_MSM=y CONFIG_USB_CONFIGFS=y @@ -503,8 +451,6 @@ CONFIG_MMC_PERF_PROFILING=y # CONFIG_PWRSEQ_SIMPLE is not set CONFIG_MMC_BLOCK_MINORS=32 CONFIG_MMC_BLOCK_DEFERRED_RESUME=y -CONFIG_MMC_TEST=m -CONFIG_MMC_RING_BUFFER=y CONFIG_MMC_PARANOID_SD_INIT=y CONFIG_MMC_CLKGATE=y CONFIG_MMC_SDHCI=y @@ -513,7 +459,10 @@ CONFIG_MMC_SDHCI_MSM=y CONFIG_MMC_CQ_HCI=y CONFIG_MMC_CQ_HCI_CRYPTO=y CONFIG_MMC_CQ_HCI_CRYPTO_QTI=y -CONFIG_LEDS_TRIGGERS=y +CONFIG_MMC_QTI_NONCMDQ_ICE=y +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +CONFIG_LEDS_QPNP_FLASH_V2=y CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_QPNP=y CONFIG_DMADEVICES=y @@ -579,13 +528,14 @@ CONFIG_QCOM_EARLY_RANDOM=y CONFIG_MSM_TZ_SMMU=y CONFIG_QSEE_IPC_IRQ=y CONFIG_QCOM_GLINK_PKT=y +CONFIG_QTI_RPM_STATS_LOG=y CONFIG_MSM_CDSP_LOADER=y CONFIG_QCOM_SMCINVOKE=y CONFIG_MSM_EVENT_TIMER=y -CONFIG_MSM_AVTIMER=y CONFIG_QCOM_FSA4480_I2C=y CONFIG_MEM_SHARE_QMI_SERVICE=y CONFIG_MSM_PERFORMANCE=y +CONFIG_QMP_DEBUGFS_CLIENT=y CONFIG_QCOM_SMP2P_SLEEPSTATE=y CONFIG_QCOM_CX_IPEAK=y CONFIG_MSM_BAM_DMUX=y @@ -606,7 +556,6 @@ CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y CONFIG_DEVFREQ_GOV_QCOM_CACHE_HWMON=y CONFIG_DEVFREQ_GOV_MEMLAT=y CONFIG_QCOM_DEVFREQ_DEVBW=y -CONFIG_EXTCON_USB_GPIO=y CONFIG_IIO=y CONFIG_QCOM_SPMI_ADC5=y CONFIG_QCOM_RRADC=y @@ -619,6 +568,7 @@ CONFIG_ANDROID=y CONFIG_ANDROID_BINDER_IPC=y CONFIG_QCOM_QFPROM=y CONFIG_SENSORS_SSC=y +CONFIG_MSM_TZ_LOG=y CONFIG_EXT4_FS=y CONFIG_EXT4_FS_SECURITY=y CONFIG_F2FS_FS=y @@ -634,8 +584,6 @@ CONFIG_MSDOS_FS=y CONFIG_VFAT_FS=y CONFIG_TMPFS=y CONFIG_TMPFS_POSIX_ACL=y -CONFIG_ECRYPT_FS=y -CONFIG_ECRYPT_FS_MESSAGING=y CONFIG_SDCARD_FS=y # CONFIG_NETWORK_FILESYSTEMS is not set CONFIG_NLS_CODEPAGE_437=y diff --git a/arch/arm/configs/vendor/sdm429-bg_defconfig b/arch/arm/configs/vendor/sdm429-bg_defconfig index eb44aa751dd1624ce850b6b15d666ff8f6889d40..242168fda63a1dff515211ef5e0b7dbc23d2a261 100644 --- a/arch/arm/configs/vendor/sdm429-bg_defconfig +++ b/arch/arm/configs/vendor/sdm429-bg_defconfig @@ -264,17 +264,6 @@ CONFIG_QSEECOM=y CONFIG_UID_SYS_STATS=y CONFIG_MEMORY_STATE_TIME=y CONFIG_QPNP_MISC=y -CONFIG_SCSI=y -CONFIG_BLK_DEV_SD=y -CONFIG_CHR_DEV_SG=y -CONFIG_CHR_DEV_SCH=y -CONFIG_SCSI_CONSTANTS=y -CONFIG_SCSI_LOGGING=y -CONFIG_SCSI_SCAN_ASYNC=y -CONFIG_SCSI_UFSHCD=y -CONFIG_SCSI_UFSHCD_PLATFORM=y -CONFIG_SCSI_UFS_QCOM=y -CONFIG_SCSI_UFSHCD_CMD_LOGGING=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y CONFIG_DM_CRYPT=y @@ -326,6 +315,7 @@ CONFIG_MSM_RMNET_BAM=y # CONFIG_NET_VENDOR_TEHUTI is not set # CONFIG_NET_VENDOR_TI is not set # CONFIG_NET_VENDOR_SYNOPSYS is not set +CONFIG_PHYLIB=y CONFIG_PPP=y CONFIG_PPP_BSDCOMP=y CONFIG_PPP_DEFLATE=y @@ -339,8 +329,6 @@ CONFIG_PPPOLAC=y CONFIG_PPPOPNS=y CONFIG_PPP_ASYNC=y CONFIG_PPP_SYNC_TTY=y -CONFIG_USB_RTL8152=y -CONFIG_USB_USBNET=y # CONFIG_WLAN_VENDOR_ADMTEK is not set # CONFIG_WLAN_VENDOR_ATMEL is not set # CONFIG_WLAN_VENDOR_BROADCOM is not set @@ -425,46 +413,11 @@ CONFIG_REGULATOR_STUB=y # CONFIG_RC_CORE is not set CONFIG_MEDIA_SUPPORT=y CONFIG_MEDIA_CAMERA_SUPPORT=y -CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y -CONFIG_MEDIA_CONTROLLER=y -CONFIG_VIDEO_V4L2_SUBDEV_API=y CONFIG_VIDEO_ADV_DEBUG=y CONFIG_VIDEO_FIXED_MINOR_RANGES=y CONFIG_V4L_PLATFORM_DRIVERS=y -CONFIG_MSM_CAMERA=y -CONFIG_MSM_CAMERA_DEBUG=y -CONFIG_MSMB_CAMERA=y -CONFIG_MSMB_CAMERA_DEBUG=y -CONFIG_MSM_CAMERA_SENSOR=y -CONFIG_MSM_CPP=y -CONFIG_MSM_CCI=y -CONFIG_MSM_CSI20_HEADER=y -CONFIG_MSM_CSI22_HEADER=y -CONFIG_MSM_CSI30_HEADER=y -CONFIG_MSM_CSI31_HEADER=y -CONFIG_MSM_CSIPHY=y -CONFIG_MSM_CSID=y -CONFIG_MSM_EEPROM=y -CONFIG_MSM_ISPIF=y -CONFIG_MSM_DUAL_ISP_SYNC=y -CONFIG_IMX134=y -CONFIG_IMX132=y -CONFIG_OV9724=y -CONFIG_OV5648=y -CONFIG_GC0339=y -CONFIG_OV8825=y -CONFIG_OV8865=y -CONFIG_s5k4e1=y -CONFIG_OV12830=y -CONFIG_MSM_V4L2_VIDEO_OVERLAY_DEVICE=y -CONFIG_MSMB_JPEG=y -CONFIG_MSM_FD=y -CONFIG_MSM_JPEGDMA=y CONFIG_MSM_VIDC_3X_V4L2=y CONFIG_MSM_VIDC_3X_GOVERNORS=y -CONFIG_DVB_MPQ=m -CONFIG_DVB_MPQ_DEMUX=m -CONFIG_DVB_MPQ_SW=y CONFIG_FB=y CONFIG_FB_VIRTUAL=y CONFIG_FB_MSM=y @@ -479,22 +432,10 @@ CONFIG_SOUND=y CONFIG_SND=y CONFIG_SND_DYNAMIC_MINORS=y CONFIG_SND_USB_AUDIO=y -CONFIG_SND_USB_AUDIO_QMI=y CONFIG_SND_SOC=y CONFIG_SND_SOC_TFA98XX=y CONFIG_UHID=y CONFIG_USB=y -CONFIG_USB_ANNOUNCE_NEW_DEVICES=y -CONFIG_USB_EHCI_HCD=y -CONFIG_USB_EHCI_MSM=y -CONFIG_USB_EHCI_HCD_PLATFORM=y -CONFIG_USB_STORAGE=y -CONFIG_USB_EHSET_TEST_FIXTURE=y -CONFIG_NOP_USB_XCEIV=y -CONFIG_USB_QCOM_EMU_PHY=y -CONFIG_USB_MSM_SSPHY_QMP=y -CONFIG_MSM_QUSB_PHY=y -CONFIG_MSM_HSUSB_PHY=y CONFIG_DUAL_ROLE_USB_INTF=y CONFIG_USB_GADGET=y CONFIG_USB_GADGET_DEBUG_FILES=y @@ -520,7 +461,6 @@ CONFIG_MMC=y CONFIG_MMC_PERF_PROFILING=y CONFIG_MMC_BLOCK_MINORS=32 CONFIG_MMC_BLOCK_DEFERRED_RESUME=y -CONFIG_MMC_TEST=m CONFIG_MMC_RING_BUFFER=y CONFIG_MMC_PARANOID_SD_INIT=y CONFIG_MMC_CLKGATE=y @@ -530,7 +470,10 @@ CONFIG_MMC_SDHCI_MSM=y CONFIG_MMC_CQ_HCI=y CONFIG_MMC_CQ_HCI_CRYPTO=y CONFIG_MMC_CQ_HCI_CRYPTO_QTI=y -CONFIG_LEDS_TRIGGERS=y +CONFIG_MMC_QTI_NONCMDQ_ICE=y +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +CONFIG_LEDS_QPNP_FLASH_V2=y CONFIG_EDAC=y CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_QPNP=y @@ -614,7 +557,6 @@ CONFIG_QTI_RPM_STATS_LOG=y CONFIG_MSM_CDSP_LOADER=y CONFIG_QCOM_SMCINVOKE=y CONFIG_MSM_EVENT_TIMER=y -CONFIG_MSM_AVTIMER=y CONFIG_QCOM_FSA4480_I2C=y CONFIG_MEM_SHARE_QMI_SERVICE=y CONFIG_MSM_PERFORMANCE=y @@ -639,7 +581,6 @@ CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y CONFIG_DEVFREQ_GOV_QCOM_CACHE_HWMON=y CONFIG_DEVFREQ_GOV_MEMLAT=y CONFIG_QCOM_DEVFREQ_DEVBW=y -CONFIG_EXTCON_USB_GPIO=y CONFIG_IIO=y CONFIG_QCOM_SPMI_ADC5=y CONFIG_QCOM_RRADC=y @@ -667,8 +608,6 @@ CONFIG_MSDOS_FS=y CONFIG_VFAT_FS=y CONFIG_TMPFS=y CONFIG_TMPFS_POSIX_ACL=y -CONFIG_ECRYPT_FS=y -CONFIG_ECRYPT_FS_MESSAGING=y CONFIG_SDCARD_FS=y # CONFIG_NETWORK_FILESYSTEMS is not set CONFIG_NLS_CODEPAGE_437=y @@ -716,7 +655,6 @@ CONFIG_RCU_TORTURE_TEST=m CONFIG_FAULT_INJECTION=y CONFIG_FAIL_PAGE_ALLOC=y CONFIG_FAIL_MMC_REQUEST=y -CONFIG_UFS_FAULT_INJECTION=y CONFIG_FAULT_INJECTION_DEBUG_FS=y CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y CONFIG_IPC_LOGGING=y diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index 88286dd483ff901bd1d215fb5450db32add47bd2..1935b580f0e8b20f06ab295a25e108d39dae1807 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -21,11 +21,11 @@ #endif #include -#include #include #include #include #include +#include #define IOMEM(x) (x) @@ -374,9 +374,9 @@ THUMB( orr \reg , \reg , #PSR_T_BIT ) .macro usraccoff, instr, reg, ptr, inc, off, cond, abort, t=TUSER() 9999: .if \inc == 1 - \instr\cond\()b\()\t\().w \reg, [\ptr, #\off] + \instr\()b\t\cond\().w \reg, [\ptr, #\off] .elseif \inc == 4 - \instr\cond\()\t\().w \reg, [\ptr, #\off] + \instr\t\cond\().w \reg, [\ptr, #\off] .else .error "Unsupported inc macro argument" .endif @@ -415,9 +415,9 @@ THUMB( orr \reg , \reg , #PSR_T_BIT ) .rept \rept 9999: .if \inc == 1 - \instr\cond\()b\()\t \reg, [\ptr], #\inc + \instr\()b\t\cond \reg, [\ptr], #\inc .elseif \inc == 4 - \instr\cond\()\t \reg, [\ptr], #\inc + \instr\t\cond \reg, [\ptr], #\inc .else .error "Unsupported inc macro argument" .endif @@ -447,79 +447,6 @@ THUMB( orr \reg , \reg , #PSR_T_BIT ) .size \name , . - \name .endm - .macro csdb -#ifdef CONFIG_THUMB2_KERNEL - .inst.w 0xf3af8014 -#else - .inst 0xe320f014 -#endif - .endm - - .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req -#ifndef CONFIG_CPU_USE_DOMAINS - adds \tmp, \addr, #\size - 1 - sbcccs \tmp, \tmp, \limit - bcs \bad -#ifdef CONFIG_CPU_SPECTRE - movcs \addr, #0 - csdb -#endif -#endif - .endm - - .macro uaccess_mask_range_ptr, addr:req, size:req, limit:req, tmp:req -#ifdef CONFIG_CPU_SPECTRE - sub \tmp, \limit, #1 - subs \tmp, \tmp, \addr @ tmp = limit - 1 - addr - addhs \tmp, \tmp, #1 @ if (tmp >= 0) { - subhss \tmp, \tmp, \size @ tmp = limit - (addr + size) } - movlo \addr, #0 @ if (tmp < 0) addr = NULL - csdb -#endif - .endm - - .macro uaccess_disable, tmp, isb=1 -#ifdef CONFIG_CPU_SW_DOMAIN_PAN - /* - * Whenever we re-enter userspace, the domains should always be - * set appropriately. - */ - mov \tmp, #DACR_UACCESS_DISABLE - mcr p15, 0, \tmp, c3, c0, 0 @ Set domain register - .if \isb - instr_sync - .endif -#endif - .endm - - .macro uaccess_enable, tmp, isb=1 -#ifdef CONFIG_CPU_SW_DOMAIN_PAN - /* - * Whenever we re-enter userspace, the domains should always be - * set appropriately. - */ - mov \tmp, #DACR_UACCESS_ENABLE - mcr p15, 0, \tmp, c3, c0, 0 - .if \isb - instr_sync - .endif -#endif - .endm - - .macro uaccess_save, tmp -#ifdef CONFIG_CPU_SW_DOMAIN_PAN - mrc p15, 0, \tmp, c3, c0, 0 - str \tmp, [sp, #SVC_DACR] -#endif - .endm - - .macro uaccess_restore -#ifdef CONFIG_CPU_SW_DOMAIN_PAN - ldr r0, [sp, #SVC_DACR] - mcr p15, 0, r0, c3, c0, 0 -#endif - .endm - .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo .macro ret\c, reg #if __LINUX_ARM_ARCH__ < 6 diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h index ffebe7b7a5b743682c071fc14a7dd30f75ebfac9..91ca80035fc4283561fbaf436e5c78b8ebd864b4 100644 --- a/arch/arm/include/asm/futex.h +++ b/arch/arm/include/asm/futex.h @@ -163,8 +163,13 @@ arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr) preempt_enable(); #endif - if (!ret) - *oval = oldval; + /* + * Store unconditionally. If ret != 0 the extra store is the least + * of the worries but GCC cannot figure out that __futex_atomic_op() + * is either setting ret to -EFAULT or storing the old value in + * oldval which results in a uninitialized warning at the call site. + */ + *oval = oldval; return ret; } diff --git a/arch/arm/include/asm/uaccess-asm.h b/arch/arm/include/asm/uaccess-asm.h new file mode 100644 index 0000000000000000000000000000000000000000..907571fd05c650c9f4a347b6df35aac40eaa5934 --- /dev/null +++ b/arch/arm/include/asm/uaccess-asm.h @@ -0,0 +1,117 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef __ASM_UACCESS_ASM_H__ +#define __ASM_UACCESS_ASM_H__ + +#include +#include +#include +#include + + .macro csdb +#ifdef CONFIG_THUMB2_KERNEL + .inst.w 0xf3af8014 +#else + .inst 0xe320f014 +#endif + .endm + + .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req +#ifndef CONFIG_CPU_USE_DOMAINS + adds \tmp, \addr, #\size - 1 + sbcscc \tmp, \tmp, \limit + bcs \bad +#ifdef CONFIG_CPU_SPECTRE + movcs \addr, #0 + csdb +#endif +#endif + .endm + + .macro uaccess_mask_range_ptr, addr:req, size:req, limit:req, tmp:req +#ifdef CONFIG_CPU_SPECTRE + sub \tmp, \limit, #1 + subs \tmp, \tmp, \addr @ tmp = limit - 1 - addr + addhs \tmp, \tmp, #1 @ if (tmp >= 0) { + subshs \tmp, \tmp, \size @ tmp = limit - (addr + size) } + movlo \addr, #0 @ if (tmp < 0) addr = NULL + csdb +#endif + .endm + + .macro uaccess_disable, tmp, isb=1 +#ifdef CONFIG_CPU_SW_DOMAIN_PAN + /* + * Whenever we re-enter userspace, the domains should always be + * set appropriately. + */ + mov \tmp, #DACR_UACCESS_DISABLE + mcr p15, 0, \tmp, c3, c0, 0 @ Set domain register + .if \isb + instr_sync + .endif +#endif + .endm + + .macro uaccess_enable, tmp, isb=1 +#ifdef CONFIG_CPU_SW_DOMAIN_PAN + /* + * Whenever we re-enter userspace, the domains should always be + * set appropriately. + */ + mov \tmp, #DACR_UACCESS_ENABLE + mcr p15, 0, \tmp, c3, c0, 0 + .if \isb + instr_sync + .endif +#endif + .endm + +#if defined(CONFIG_CPU_SW_DOMAIN_PAN) || defined(CONFIG_CPU_USE_DOMAINS) +#define DACR(x...) x +#else +#define DACR(x...) +#endif + + /* + * Save the address limit on entry to a privileged exception. + * + * If we are using the DACR for kernel access by the user accessors + * (CONFIG_CPU_USE_DOMAINS=y), always reset the DACR kernel domain + * back to client mode, whether or not \disable is set. + * + * If we are using SW PAN, set the DACR user domain to no access + * if \disable is set. + */ + .macro uaccess_entry, tsk, tmp0, tmp1, tmp2, disable + ldr \tmp1, [\tsk, #TI_ADDR_LIMIT] + mov \tmp2, #TASK_SIZE + str \tmp2, [\tsk, #TI_ADDR_LIMIT] + DACR( mrc p15, 0, \tmp0, c3, c0, 0) + DACR( str \tmp0, [sp, #SVC_DACR]) + str \tmp1, [sp, #SVC_ADDR_LIMIT] + .if \disable && IS_ENABLED(CONFIG_CPU_SW_DOMAIN_PAN) + /* kernel=client, user=no access */ + mov \tmp2, #DACR_UACCESS_DISABLE + mcr p15, 0, \tmp2, c3, c0, 0 + instr_sync + .elseif IS_ENABLED(CONFIG_CPU_USE_DOMAINS) + /* kernel=client */ + bic \tmp2, \tmp0, #domain_mask(DOMAIN_KERNEL) + orr \tmp2, \tmp2, #domain_val(DOMAIN_KERNEL, DOMAIN_CLIENT) + mcr p15, 0, \tmp2, c3, c0, 0 + instr_sync + .endif + .endm + + /* Restore the user access state previously saved by uaccess_entry */ + .macro uaccess_exit, tsk, tmp0, tmp1 + ldr \tmp1, [sp, #SVC_ADDR_LIMIT] + DACR( ldr \tmp0, [sp, #SVC_DACR]) + str \tmp1, [\tsk, #TI_ADDR_LIMIT] + DACR( mcr p15, 0, \tmp0, c3, c0, 0) + .endm + +#undef DACR + +#endif /* __ASM_UACCESS_ASM_H__ */ diff --git a/arch/arm/include/asm/vfpmacros.h b/arch/arm/include/asm/vfpmacros.h index ef5dfedacd8d642bdfe27147e37bdec0a6f055bb..628c336e8e3b20c5918f58d9d8f5139b10723953 100644 --- a/arch/arm/include/asm/vfpmacros.h +++ b/arch/arm/include/asm/vfpmacros.h @@ -29,13 +29,13 @@ ldr \tmp, =elf_hwcap @ may not have MVFR regs ldr \tmp, [\tmp, #0] tst \tmp, #HWCAP_VFPD32 - ldcnel p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31} + ldclne p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31} addeq \base, \base, #32*4 @ step over unused register space #else VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0 and \tmp, \tmp, #MVFR0_A_SIMD_MASK @ A_SIMD field cmp \tmp, #2 @ 32 x 64bit registers? - ldceql p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31} + ldcleq p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31} addne \base, \base, #32*4 @ step over unused register space #endif #endif @@ -53,13 +53,13 @@ ldr \tmp, =elf_hwcap @ may not have MVFR regs ldr \tmp, [\tmp, #0] tst \tmp, #HWCAP_VFPD32 - stcnel p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31} + stclne p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31} addeq \base, \base, #32*4 @ step over unused register space #else VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0 and \tmp, \tmp, #MVFR0_A_SIMD_MASK @ A_SIMD field cmp \tmp, #2 @ 32 x 64bit registers? - stceql p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31} + stcleq p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31} addne \base, \base, #32*4 @ step over unused register space #endif #endif diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index fbc707626b3e9e6794400e41ba1b08916cdf8d4a..f3de76f7ad439a4dea237fb558738b2fa4498676 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -30,6 +30,7 @@ #include #include #include +#include #include "entry-header.S" #include @@ -186,15 +187,7 @@ ENDPROC(__und_invalid) stmia r7, {r2 - r6} get_thread_info tsk - ldr r0, [tsk, #TI_ADDR_LIMIT] - mov r1, #TASK_SIZE - str r1, [tsk, #TI_ADDR_LIMIT] - str r0, [sp, #SVC_ADDR_LIMIT] - - uaccess_save r0 - .if \uaccess - uaccess_disable r0 - .endif + uaccess_entry tsk, r0, r1, r2, \uaccess .if \trace #ifdef CONFIG_TRACE_IRQFLAGS diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S index 62db1c9746cbc83607c9eaeb45b52a294b94e36e..7b595f2d4a28286f4926425524325185b601df85 100644 --- a/arch/arm/kernel/entry-header.S +++ b/arch/arm/kernel/entry-header.S @@ -6,6 +6,7 @@ #include #include #include +#include #include @ Bad Abort numbers @@ -217,9 +218,7 @@ blne trace_hardirqs_off #endif .endif - ldr r1, [sp, #SVC_ADDR_LIMIT] - uaccess_restore - str r1, [tsk, #TI_ADDR_LIMIT] + uaccess_exit tsk, r0, r1 #ifndef CONFIG_THUMB2_KERNEL @ ARM mode SVC restore @@ -263,9 +262,7 @@ @ on the stack remains correct). @ .macro svc_exit_via_fiq - ldr r1, [sp, #SVC_ADDR_LIMIT] - uaccess_restore - str r1, [tsk, #TI_ADDR_LIMIT] + uaccess_exit tsk, r0, r1 #ifndef CONFIG_THUMB2_KERNEL @ ARM mode restore mov r0, sp diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index 58e3771e4c5bb8974c55e553bcb95a87c1173c23..368b4b404985c77bd23e8fd7b437da9f332ca083 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c @@ -228,8 +228,8 @@ static struct undef_hook arm_break_hook = { }; static struct undef_hook thumb_break_hook = { - .instr_mask = 0xffff, - .instr_val = 0xde01, + .instr_mask = 0xffffffff, + .instr_val = 0x0000de01, .cpsr_mask = PSR_T_BIT, .cpsr_val = PSR_T_BIT, .fn = break_trap, diff --git a/arch/arm/lib/bitops.h b/arch/arm/lib/bitops.h index 93cddab73072cc716b07c0353ec79bdbaec3757a..95bd359912889a5d31ceaefeaefb7597deaf5c14 100644 --- a/arch/arm/lib/bitops.h +++ b/arch/arm/lib/bitops.h @@ -7,7 +7,7 @@ ENTRY( \name ) UNWIND( .fnstart ) ands ip, r1, #3 - strneb r1, [ip] @ assert word-aligned + strbne r1, [ip] @ assert word-aligned mov r2, #1 and r3, r0, #31 @ Get bit offset mov r0, r0, lsr #5 @@ -32,7 +32,7 @@ ENDPROC(\name ) ENTRY( \name ) UNWIND( .fnstart ) ands ip, r1, #3 - strneb r1, [ip] @ assert word-aligned + strbne r1, [ip] @ assert word-aligned mov r2, #1 and r3, r0, #31 @ Get bit offset mov r0, r0, lsr #5 @@ -62,7 +62,7 @@ ENDPROC(\name ) ENTRY( \name ) UNWIND( .fnstart ) ands ip, r1, #3 - strneb r1, [ip] @ assert word-aligned + strbne r1, [ip] @ assert word-aligned and r2, r0, #31 mov r0, r0, lsr #5 mov r3, #1 @@ -89,7 +89,7 @@ ENDPROC(\name ) ENTRY( \name ) UNWIND( .fnstart ) ands ip, r1, #3 - strneb r1, [ip] @ assert word-aligned + strbne r1, [ip] @ assert word-aligned and r3, r0, #31 mov r0, r0, lsr #5 save_and_disable_irqs ip diff --git a/arch/arm/mach-imx/pm-imx5.c b/arch/arm/mach-imx/pm-imx5.c index 868781fd460c788950ac59e6ef197bede4ab6660..14c630c899c5dc00d6be122f03aa3c4ad8927dec 100644 --- a/arch/arm/mach-imx/pm-imx5.c +++ b/arch/arm/mach-imx/pm-imx5.c @@ -301,14 +301,14 @@ static int __init imx_suspend_alloc_ocram( if (!ocram_pool) { pr_warn("%s: ocram pool unavailable!\n", __func__); ret = -ENODEV; - goto put_node; + goto put_device; } ocram_base = gen_pool_alloc(ocram_pool, size); if (!ocram_base) { pr_warn("%s: unable to alloc ocram!\n", __func__); ret = -ENOMEM; - goto put_node; + goto put_device; } phys = gen_pool_virt_to_phys(ocram_pool, ocram_base); @@ -318,6 +318,8 @@ static int __init imx_suspend_alloc_ocram( if (virt_out) *virt_out = virt; +put_device: + put_device(&pdev->dev); put_node: of_node_put(node); diff --git a/arch/arm/mach-imx/pm-imx6.c b/arch/arm/mach-imx/pm-imx6.c index 6078bcc9f594a798ede60366c87784fd22a7237f..c7dcb0b2073015d09ba482913285d3f830d092be 100644 --- a/arch/arm/mach-imx/pm-imx6.c +++ b/arch/arm/mach-imx/pm-imx6.c @@ -483,14 +483,14 @@ static int __init imx6q_suspend_init(const struct imx6_pm_socdata *socdata) if (!ocram_pool) { pr_warn("%s: ocram pool unavailable!\n", __func__); ret = -ENODEV; - goto put_node; + goto put_device; } ocram_base = gen_pool_alloc(ocram_pool, MX6Q_SUSPEND_OCRAM_SIZE); if (!ocram_base) { pr_warn("%s: unable to alloc ocram!\n", __func__); ret = -ENOMEM; - goto put_node; + goto put_device; } ocram_pbase = gen_pool_virt_to_phys(ocram_pool, ocram_base); @@ -513,7 +513,7 @@ static int __init imx6q_suspend_init(const struct imx6_pm_socdata *socdata) ret = imx6_pm_get_base(&pm_info->mmdc_base, socdata->mmdc_compat); if (ret) { pr_warn("%s: failed to get mmdc base %d!\n", __func__, ret); - goto put_node; + goto put_device; } ret = imx6_pm_get_base(&pm_info->src_base, socdata->src_compat); @@ -560,7 +560,7 @@ static int __init imx6q_suspend_init(const struct imx6_pm_socdata *socdata) &imx6_suspend, MX6Q_SUSPEND_OCRAM_SIZE - sizeof(*pm_info)); - goto put_node; + goto put_device; pl310_cache_map_failed: iounmap(pm_info->gpc_base.vbase); @@ -570,6 +570,8 @@ static int __init imx6q_suspend_init(const struct imx6_pm_socdata *socdata) iounmap(pm_info->src_base.vbase); src_map_failed: iounmap(pm_info->mmdc_base.vbase); +put_device: + put_device(&pdev->dev); put_node: of_node_put(node); diff --git a/arch/arm/mach-integrator/Kconfig b/arch/arm/mach-integrator/Kconfig index cefe44f6889bd2a35baeecfc2303ab1aac6e1d42..ba124f8704facdd381f62e9d739e09afdeb42a3d 100644 --- a/arch/arm/mach-integrator/Kconfig +++ b/arch/arm/mach-integrator/Kconfig @@ -3,6 +3,8 @@ menuconfig ARCH_INTEGRATOR depends on ARCH_MULTI_V4T || ARCH_MULTI_V5 || ARCH_MULTI_V6 select ARM_AMBA select COMMON_CLK_VERSATILE + select CMA + select DMA_CMA select HAVE_TCM select ICST select MFD_SYSCON @@ -34,14 +36,13 @@ config INTEGRATOR_IMPD1 select ARM_VIC select GPIO_PL061 select GPIOLIB + select REGULATOR + select REGULATOR_FIXED_VOLTAGE help The IM-PD1 is an add-on logic module for the Integrator which allows ARM(R) Ltd PrimeCells to be developed and evaluated. The IM-PD1 can be found on the Integrator/PP2 platform. - To compile this driver as a module, choose M here: the - module will be called impd1. - config INTEGRATOR_CM7TDMI bool "Integrator/CM7TDMI core module" depends on ARCH_INTEGRATOR_AP diff --git a/arch/arm/mach-tegra/tegra.c b/arch/arm/mach-tegra/tegra.c index 02e712d2ea300b2040efefbfab43cc8cd9cab2e0..bbc2926bd12bf49ba52dcb532d282bf7f0dcf70a 100644 --- a/arch/arm/mach-tegra/tegra.c +++ b/arch/arm/mach-tegra/tegra.c @@ -108,8 +108,8 @@ static const char * const tegra_dt_board_compat[] = { }; DT_MACHINE_START(TEGRA_DT, "NVIDIA Tegra SoC (Flattened Device Tree)") - .l2c_aux_val = 0x3c400001, - .l2c_aux_mask = 0xc20fc3fe, + .l2c_aux_val = 0x3c400000, + .l2c_aux_mask = 0xc20fc3ff, .smp = smp_ops(tegra_smp_ops), .map_io = tegra_map_common_io, .init_early = tegra_init_early, diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index fc89fa4dc9790091a1bcd0ca80ae1a1a9091146f..c0b56e79c686e5e8d3ab176937067b4d5c930220 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -1992,7 +1992,10 @@ int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, for_each_sg(sg, s, nents, i) { s->dma_address = iova + current_offset; - s->dma_length = total_length - current_offset; + if (i == 0) + s->dma_length = total_length; + else + s->dma_length = 0; current_offset += s->length; } diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S index 53d59cdbd83be7d06e54c61019051e12ef71779f..8f8774bd4d5722688457730b8b2db580eee7d54b 100644 --- a/arch/arm/mm/proc-macros.S +++ b/arch/arm/mm/proc-macros.S @@ -5,6 +5,7 @@ * VMA_VM_FLAGS * VM_EXEC */ +#include #include #include @@ -30,7 +31,7 @@ * act_mm - get current->active_mm */ .macro act_mm, rd - bic \rd, sp, #8128 + bic \rd, sp, #(THREAD_SIZE - 1) & ~63 bic \rd, \rd, #63 ldr \rd, [\rd, #TI_TASK] .if (TSK_ACTIVE_MM > IMM12_MASK) diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi index 3c3057944960802a27d54a387c3bbadcf647bbf5..3ee6c4bae08f67954bdd0dccffc8a69fdad0b244 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi @@ -245,6 +245,11 @@ }; }; +&hwrng { + clocks = <&clkc CLKID_RNG0>; + clock-names = "core"; +}; + &i2c_A { clocks = <&clkc CLKID_I2C>; }; diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile index 6dcaa99ed2c5f140b03a01593c7a88bb2793b3d4..a4d97110c95bfbf180f38f6fbce783c8a454f7ec 100644 --- a/arch/arm64/boot/dts/qcom/Makefile +++ b/arch/arm64/boot/dts/qcom/Makefile @@ -7,8 +7,10 @@ ifeq ($(CONFIG_BUILD_ARM64_DT_OVERLAY),y) sa8155p-adp-star-overlay.dtbo \ sa8155p-adp-star-lpass-overlay.dtbo \ sa8155-v2-adp-air-overlay.dtbo \ - sa8155p-v2-adp-air-overlay.dtbo \ - sa8155p-v2-adp-air-lpass-overlay.dtbo + sa8155p-v2-adp-air-overlay.dtbo \ + sa8155p-v2-adp-air-lpass-overlay.dtbo \ + sa8155-v2-adp-air-lxc-overlay.dtbo \ + sa8155p-v2-adp-air-lxc-overlay.dtbo sm8150-cdp-overlay.dtbo-base := sm8150.dtb sm8150-v2.dtb sm8150p.dtb sm8150p-v2.dtb sa8155-adp-star-overlay.dtbo-base := sa8155.dtb sa8155-v2.dtb @@ -17,6 +19,8 @@ sa8155p-adp-star-lpass-overlay.dtbo := sa8155p.dtb sa8155p-v2.dtb sa8155-v2-adp-air-overlay.dtbo-base := sa8155.dtb sa8155-v2.dtb sa8155p-v2-adp-air-overlay.dtbo-base := sa8155p.dtb sa8155p-v2.dtb sa8155p-v2-adp-air-lpass-overlay.dtbo-base := sa8155p.dtb sa8155p-v2.dtb +sa8155-v2-adp-air-lxc-overlay.dtbo-base := sa8155.dtb sa8155-v2.dtb +sa8155p-v2-adp-air-lxc-overlay.dtbo-base := sa8155p.dtb sa8155p-v2.dtb else dtb-$(CONFIG_ARCH_SM8150) += sm8150-cdp.dtb \ sa8155-adp-star.dtb \ @@ -36,11 +40,13 @@ ifeq ($(CONFIG_BUILD_ARM64_DT_OVERLAY),y) dtbo-$(CONFIG_ARCH_SDMSHRIKE) += \ sdmshrike-cdp-overlay.dtbo \ sa8195p-adp-star-overlay.dtbo \ - sa8195p-v2-adp-air-overlay.dtbo + sa8195p-v2-adp-air-overlay.dtbo \ + sa8195p-v2-adp-air-lxc-overlay.dtbo sdmshrike-cdp-overlay.dtbo-base := sdmshrike.dtb sdmshrike-v2.dtb sa8195p-adp-star-overlay.dtbo-base := sa8195p.dtb sa8195p-v2-adp-air-overlay.dtbo-base := sa8195p.dtb +sa8195p-v2-adp-air-lxc-overlay.dtbo-base := sa8195p.dtb else dtb-$(CONFIG_ARCH_SDMSHRIKE) += sdmshrike-cdp.dtb \ sa8195p-adp-star.dtb \ @@ -56,7 +62,8 @@ ifeq ($(CONFIG_BUILD_ARM64_DT_OVERLAY),y) sa6155p-adp-air-overlay.dtbo \ sa6155p-v2-adp-star-overlay.dtbo \ sa6155p-v2-adp-air-overlay.dtbo \ - sa6155p-v2-adp-air-lpass-overlay.dtbo + sa6155p-v2-adp-air-lpass-overlay.dtbo \ + sa6155p-v2-adp-air-lxc-overlay.dtbo sa6155-adp-star-overlay.dtbo-base := sa6155.dtb sa6155p-adp-star-overlay.dtbo-base := sa6155p.dtb @@ -66,6 +73,7 @@ sa6155-adp-air-overlay.dtbo-base := sa6155.dtb sa6155p-adp-air-overlay.dtbo-base := sa6155p.dtb sa6155p-v2-adp-air-overlay.dtbo-base := sa6155p.dtb sa6155p-v2-adp-air-lpass-overlay.dtbo-base := sa6155p.dtb +sa6155p-v2-adp-air-lxc-overlay.dtbo-base := sa6155p.dtb else dtb-$(CONFIG_ARCH_SM6150) += sa6155-adp-star.dtb \ sa6155p-adp-star.dtb \ @@ -225,7 +233,8 @@ dtb-$(CONFIG_ARCH_SDMSHRIKE) += sdmshrike-rumi.dtb \ sdmshrike-mtp.dtb \ sdmshrike-cdp.dtb \ sdmshrike-v2-mtp.dtb \ - sa8195p-adp-star.dtb + sa8195p-adp-star.dtb \ + sa8195p-v2-adp-air-capture.dtb endif ifeq ($(CONFIG_BUILD_ARM64_DT_OVERLAY),y) @@ -247,7 +256,12 @@ ifeq ($(CONFIG_BUILD_ARM64_DT_OVERLAY),y) sa6155p-v2-adp-air-overlay.dtbo \ qcs610-iot-overlay.dtbo \ qcs610-ipc-overlay.dtbo \ - qcs410-iot-overlay.dtbo + qcs410-iot-overlay.dtbo \ + prairie-iot-idp-overlay.dtbo \ + prairie-iot-external-codec-idp-overlay.dtbo \ + prairie-iot-usbc-idp-overlay.dtbo \ + prairie-iot-cmd-mone-display-idp-overlay.dtbo \ + prairie-iot-usbc-minidp-idp-overlay.dtbo sm6150-rumi-overlay.dtbo-base := sm6150.dtb sm6150-qrd-overlay.dtbo-base := sm6150.dtb @@ -267,6 +281,11 @@ sa6155p-v2-adp-air-overlay.dtbo-base := sa6155p.dtb qcs610-iot-overlay.dtbo-base := qcs610.dtb qcs610-ipc-overlay.dtbo-base := qcs610.dtb qcs410-iot-overlay.dtbo-base := qcs410.dtb +prairie-iot-idp-overlay.dtbo-base := qcs610.dtb +prairie-iot-external-codec-idp-overlay.dtbo-base := qcs610.dtb +prairie-iot-usbc-idp-overlay.dtbo-base := qcs610.dtb +prairie-iot-cmd-mode-display-idp-overlay.dtbo-base := qcs610.dtb +prairie-iot-usbc-minidp-idp-overlay.dtbo-base := qcs610.dtb else dtb-$(CONFIG_ARCH_SM6150) += sm6150-rumi.dtb \ sm6150-qrd.dtb \ @@ -285,7 +304,12 @@ dtb-$(CONFIG_ARCH_SM6150) += sm6150-rumi.dtb \ sa6155p-v2-adp-air.dtb \ qcs610-iot.dtb \ qcs610-ipc.dtb \ - qcs410-iot.dtb + qcs410-iot.dtb \ + prairie-iot-idp.dtb \ + prairie-iot-external-codec-idp.dtb \ + prairie-iot-usbc-idp.dtb \ + prairie-iot-cmd-mode-display-idp.dtb \ + prairie-iot-usbc-minidp-idp.dtb endif diff --git a/arch/arm64/boot/dts/qcom/prairie-iot-cmd-mode-display-idp-overlay.dts b/arch/arm64/boot/dts/qcom/prairie-iot-cmd-mode-display-idp-overlay.dts new file mode 100644 index 0000000000000000000000000000000000000000..451b46b4bc9c18ce63248e5e4aa532a111e1879e --- /dev/null +++ b/arch/arm64/boot/dts/qcom/prairie-iot-cmd-mode-display-idp-overlay.dts @@ -0,0 +1,62 @@ +/* Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/dts-v1/; +/plugin/; + +#include +#include + +#include "prairie-iot-idp.dtsi" +#include "sm6150-audio-overlay.dtsi" + +/ { + model = "Command mode display IDP overlay"; + compatible = "qcom,qcs610-idp", "qcom,qcs610", "qcom,idp"; + qcom,msm-id = <401 0x0>; + qcom,board-id = <34 3>; +}; + +&qupv3_se1_i2c { + synaptics_dsx@20 { + compatible = "synaptics,dsx-i2c"; + reg = <0x20>; + interrupt-parent = <&tlmm>; + interrupts = <89 0x2008>; + vdd-supply = <&pm6150_l10>; + avdd-supply = <&pm6150l_l7>; + pinctrl-names = "pmx_ts_active","pmx_ts_suspend", + "pmx_ts_release"; + pinctrl-0 = <&ts_int_active &ts_reset_active>; + pinctrl-1 = <&ts_int_suspend &ts_reset_suspend>; + pinctrl-2 = <&ts_release>; + synaptics,pwr-reg-name = "avdd"; + synaptics,bus-reg-name = "vdd"; + synaptics,ub-i2c-addr = <0x20>; + synaptics,max-y-for-2d = <2159>; + synaptics,irq-gpio = <&tlmm 89 0x2008>; + synaptics,reset-gpio = <&tlmm 88 0x0>; + synaptics,irq-on-state = <0>; + synaptics,power-delay-ms = <200>; + synaptics,reset-delay-ms = <200>; + synaptics,reset-on-state = <0>; + synaptics,reset-active-ms = <20>; + }; + + himax_ts@48 { + status = "disabled"; + }; +}; + +&dsi_td4328_truly_cmd_display { + qcom,dsi-display-active; +}; diff --git a/arch/arm64/boot/dts/qcom/prairie-iot-cmd-mode-display-idp.dts b/arch/arm64/boot/dts/qcom/prairie-iot-cmd-mode-display-idp.dts new file mode 100644 index 0000000000000000000000000000000000000000..24f4ca89323cc994c5cc2906a95fa4f75a5d7f1d --- /dev/null +++ b/arch/arm64/boot/dts/qcom/prairie-iot-cmd-mode-display-idp.dts @@ -0,0 +1,23 @@ +/* Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/dts-v1/; + +#include "qcs610.dtsi" +#include "prairie-iot-idp.dtsi" + +/ { + model = "Qualcomm Technologies, Inc. QCS610 Command mode display IDP"; + compatible = "qcom,qcs610-idp", "qcom,qcs610", "qcom,idp"; + qcom,msm-id = <401 0x0>; + qcom,board-id = <34 3>; +}; diff --git a/arch/arm64/boot/dts/qcom/prairie-iot-external-codec-idp-overlay.dts b/arch/arm64/boot/dts/qcom/prairie-iot-external-codec-idp-overlay.dts new file mode 100644 index 0000000000000000000000000000000000000000..81ef59c3fe4dcbef138b7c841d1f722029346de2 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/prairie-iot-external-codec-idp-overlay.dts @@ -0,0 +1,32 @@ +/* Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/dts-v1/; +/plugin/; + +#include +#include + +#include "prairie-iot-idp.dtsi" +#include "sm6150-ext-codec-audio-overlay.dtsi" +#include "sm6150-external-codec.dtsi" + +/ { + model = "External Audio Codec IDP overlay"; + compatible = "qcom,qcs610-idp", "qcom,qcs610", "qcom,idp"; + qcom,msm-id = <401 0x0>; + qcom,board-id = <34 1>; +}; + +&dsi_hx83112a_truly_vid_display { + qcom,dsi-display-active; +}; diff --git a/arch/arm64/boot/dts/qcom/prairie-iot-external-codec-idp.dts b/arch/arm64/boot/dts/qcom/prairie-iot-external-codec-idp.dts new file mode 100644 index 0000000000000000000000000000000000000000..b958ecea7061d7302b4d9d613c544d28ce80101e --- /dev/null +++ b/arch/arm64/boot/dts/qcom/prairie-iot-external-codec-idp.dts @@ -0,0 +1,24 @@ +/* Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/dts-v1/; + +#include "qcs610.dtsi" +#include "prairie-iot-idp.dtsi" +#include "prairie-iot-external-codec-idp.dtsi" +#include "sm6150-ext-codec-audio-overlay.dtsi" + +/ { + model = "Qualcomm Technologies, Inc. QCS610 External Audio Codec IDP"; + compatible = "qcom,qcs610-idp", "qcom,qcs610", "qcom,idp"; + qcom,board-id = <34 1>; +}; diff --git a/arch/arm64/boot/dts/qcom/prairie-iot-external-codec-idp.dtsi b/arch/arm64/boot/dts/qcom/prairie-iot-external-codec-idp.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..1a7cc75f7cf0adc136eef2432690d755ac010f35 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/prairie-iot-external-codec-idp.dtsi @@ -0,0 +1,153 @@ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include "sm6150-wcd.dtsi" + +&sm6150_snd { + qcom,model = "sm6150-tavil-snd-card"; + qcom,tavil_codec = <1>; + qcom,ext-disp-audio-rx = <1>; + qcom,audio-routing = + "AIF4 VI", "MCLK", + "RX_BIAS", "MCLK", + "MADINPUT", "MCLK", + "hifi amp", "LINEOUT1", + "hifi amp", "LINEOUT2", + "AMIC2", "MIC BIAS2", + "MIC BIAS2", "Headset Mic", + "AMIC3", "MIC BIAS2", + "MIC BIAS2", "ANCRight Headset Mic", + "AMIC4", "MIC BIAS2", + "MIC BIAS2", "ANCLeft Headset Mic", + "AMIC5", "MIC BIAS3", + "MIC BIAS3", "Handset Mic", + "DMIC0", "MIC BIAS1", + "MIC BIAS1", "Digital Mic0", + "DMIC1", "MIC BIAS1", + "MIC BIAS1", "Digital Mic1", + "DMIC2", "MIC BIAS3", + "MIC BIAS3", "Digital Mic2", + "DMIC3", "MIC BIAS3", + "MIC BIAS3", "Digital Mic3", + "DMIC4", "MIC BIAS4", + "MIC BIAS4", "Digital Mic4", + "DMIC5", "MIC BIAS4", + "MIC BIAS4", "Digital Mic5", + "SpkrLeft IN", "SPK1 OUT", + "SpkrRight IN", "SPK2 OUT"; + asoc-cpu = <&dai_dp>, <&dai_mi2s0>, <&dai_mi2s1>, + <&dai_mi2s2>, <&dai_mi2s3>, <&dai_mi2s4>, + <&dai_pri_auxpcm>, <&dai_sec_auxpcm>, + <&dai_tert_auxpcm>, <&dai_quat_auxpcm>, + <&dai_quin_auxpcm>, + <&sb_0_rx>, <&sb_0_tx>, <&sb_1_rx>, <&sb_1_tx>, + <&sb_2_rx>, <&sb_2_tx>, <&sb_3_rx>, <&sb_3_tx>, + <&sb_4_rx>, <&sb_4_tx>, <&sb_5_rx>, <&sb_5_tx>, + <&sb_6_rx>, <&sb_7_rx>, <&sb_7_tx>, + <&sb_8_rx>, <&sb_8_tx>, + <&proxy_rx>, <&proxy_tx>, + <&afe_pcm_rx>, <&afe_pcm_tx>, <&afe_proxy_rx>, + <&afe_proxy_tx>, <&incall_record_rx>, + <&incall_record_tx>, <&incall_music_rx>, + <&incall_music_2_rx>, + <&usb_audio_rx>, <&usb_audio_tx>, + <&dai_pri_tdm_rx_0>, <&dai_pri_tdm_tx_0>, + <&dai_sec_tdm_rx_0>, <&dai_sec_tdm_tx_0>, + <&dai_tert_tdm_rx_0>, <&dai_tert_tdm_tx_0>, + <&dai_quat_tdm_rx_0>, <&dai_quat_tdm_tx_0>, + <&dai_quin_tdm_rx_0>, <&dai_quin_tdm_tx_0>; + asoc-cpu-names = "msm-dai-q6-dp.24608", + "msm-dai-q6-mi2s.0", "msm-dai-q6-mi2s.1", + "msm-dai-q6-mi2s.2", "msm-dai-q6-mi2s.3", + "msm-dai-q6-mi2s.4", + "msm-dai-q6-auxpcm.1", "msm-dai-q6-auxpcm.2", + "msm-dai-q6-auxpcm.3", "msm-dai-q6-auxpcm.4", + "msm-dai-q6-auxpcm.5", + "msm-dai-q6-dev.16384", "msm-dai-q6-dev.16385", + "msm-dai-q6-dev.16386", "msm-dai-q6-dev.16387", + "msm-dai-q6-dev.16388", "msm-dai-q6-dev.16389", + "msm-dai-q6-dev.16390", "msm-dai-q6-dev.16391", + "msm-dai-q6-dev.16392", "msm-dai-q6-dev.16393", + "msm-dai-q6-dev.16394", "msm-dai-q6-dev.16395", + "msm-dai-q6-dev.16396", + "msm-dai-q6-dev.16398", "msm-dai-q6-dev.16399", + "msm-dai-q6-dev.16400", "msm-dai-q6-dev.16401", + "msm-dai-q6-dev.8194", "msm-dai-q6-dev.8195", + "msm-dai-q6-dev.224", "msm-dai-q6-dev.225", + "msm-dai-q6-dev.241", "msm-dai-q6-dev.240", + "msm-dai-q6-dev.32771", "msm-dai-q6-dev.32772", + "msm-dai-q6-dev.32773", "msm-dai-q6-dev.32770", + "msm-dai-q6-dev.28672", "msm-dai-q6-dev.28673", + "msm-dai-q6-tdm.36864", "msm-dai-q6-tdm.36865", + "msm-dai-q6-tdm.36880", "msm-dai-q6-tdm.36881", + "msm-dai-q6-tdm.36896", "msm-dai-q6-tdm.36897", + "msm-dai-q6-tdm.36912", "msm-dai-q6-tdm.36913", + "msm-dai-q6-tdm.36928", "msm-dai-q6-tdm.36929"; + asoc-codec = <&stub_codec>, <&ext_disp_audio_codec>; + asoc-codec-names = "msm-stub-codec.1", "msm-ext-disp-audio-codec-rx"; + qcom,hph-en0-gpio = <&tavil_hph_en0>; + qcom,hph-en1-gpio = <&tavil_hph_en1>; + qcom,wsa-max-devs = <2>; + qcom,wsa-devs = <&wsa881x_11>, <&wsa881x_12>, + <&wsa881x_13>, <&wsa881x_14>; + qcom,wsa-aux-dev-prefix = "SpkrLeft", "SpkrRight", + "SpkrLeft", "SpkrRight"; + qcom,msm_audio_ssr_devs = <&audio_apr>, <&wcd934x_cdc>, + <&q6core>, <&lpi_tlmm>; +}; + +&slim_aud { + status = "okay"; + tavil_codec { + swr3: swr_master { + compatible = "qcom,swr-mstr"; + #address-cells = <2>; + #size-cells = <0>; + qcom,swr-num-ports = <8>; + qcom,swr-port-mapping = <1 SPKR_L 0x1>, + <2 SPKR_L_COMP 0xF>, <3 SPKR_L_BOOST 0x3>, + <4 SPKR_R 0x1>, <5 SPKR_R_COMP 0xF>, + <6 SPKR_R_BOOST 0x3>, <7 SPKR_L_VI 0x3>, + <8 SPKR_R_VI 0x3>; + qcom,swr_master_id = <1>; + wsa881x_11: wsa881x@11 { + compatible = "qcom,wsa881x"; + reg = <0x00 0x20170211>; + qcom,spkr-sd-n-node = <&wsa_spk_wcd_sd1>; + }; + + wsa881x_12: wsa881x@12 { + compatible = "qcom,wsa881x"; + reg = <0x00 0x20170212>; + qcom,spkr-sd-n-node = <&wsa_spk_wcd_sd2>; + }; + + wsa881x_13: wsa881x@13 { + compatible = "qcom,wsa881x"; + reg = <0x00 0x21170213>; + qcom,spkr-sd-n-node = <&wsa_spk_wcd_sd1>; + }; + + wsa881x_14: wsa881x@14 { + compatible = "qcom,wsa881x"; + reg = <0x00 0x21170214>; + qcom,spkr-sd-n-node = <&wsa_spk_wcd_sd2>; + }; + }; + }; +}; + +&dai_slim { + status = "okay"; +}; diff --git a/arch/arm64/boot/dts/qcom/prairie-iot-idp-overlay.dts b/arch/arm64/boot/dts/qcom/prairie-iot-idp-overlay.dts new file mode 100644 index 0000000000000000000000000000000000000000..1a20dc4cc483b600a005b6499322f081455468eb --- /dev/null +++ b/arch/arm64/boot/dts/qcom/prairie-iot-idp-overlay.dts @@ -0,0 +1,31 @@ +/* Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/dts-v1/; +/plugin/; + +#include +#include + +#include "prairie-iot-idp.dtsi" +#include "sm6150-audio-overlay.dtsi" + +/ { + model = "Internal Audio Codec IDP overlay"; + compatible = "qcom,qcs610-idp", "qcom,qcs610", "qcom,idp"; + qcom,msm-id = <401 0x0>; + qcom,board-id = <34 0>; +}; + +&dsi_hx83112a_truly_vid_display { + qcom,dsi-display-active; +}; diff --git a/arch/arm64/boot/dts/qcom/prairie-iot-idp.dts b/arch/arm64/boot/dts/qcom/prairie-iot-idp.dts new file mode 100644 index 0000000000000000000000000000000000000000..c17d7af7a4dc37344e9ef9b9d44ced41adeb4331 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/prairie-iot-idp.dts @@ -0,0 +1,23 @@ +/* Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/dts-v1/; + +#include "qcs610.dtsi" +#include "prairie-iot-idp.dtsi" +#include "sm6150-audio-overlay.dtsi" + +/ { + model = "Qualcomm Technologies, Inc. QCS610 PM6150 IDP"; + compatible = "qcom,qcs610-idp", "qcom,qcs610", "qcom,idp"; + qcom,board-id = <34 0>; +}; diff --git a/arch/arm64/boot/dts/qcom/prairie-iot-idp.dtsi b/arch/arm64/boot/dts/qcom/prairie-iot-idp.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..41c3de4ef9bce53392bcd9d0a827384a3591e83d --- /dev/null +++ b/arch/arm64/boot/dts/qcom/prairie-iot-idp.dtsi @@ -0,0 +1,417 @@ +/* Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "sm6150-thermal-overlay.dtsi" +#include "sm6150-camera-sensor-idp.dtsi" +#include +#include +#include +#include "sm6150-sde-display.dtsi" +#include + +&qupv3_se3_i2c { + #address-cells = <1>; + #size-cells = <0>; + + status = "ok"; + #include "smb1390.dtsi" + #include "smb1355.dtsi" +}; + +&pm6150l_gpios { + key_vol_up { + key_vol_up_default: key_vol_up_default { + pins = "gpio2"; + function = "normal"; + input-enable; + bias-pull-up; + power-source = <0>; + }; + }; +}; + +&soc { + gpio_keys { + compatible = "gpio-keys"; + label = "gpio-keys"; + + pinctrl-names = "default"; + pinctrl-0 = <&key_vol_up_default>; + + vol_up { + label = "volume_up"; + gpios = <&pm6150l_gpios 2 GPIO_ACTIVE_LOW>; + linux,input-type = <1>; + linux,code = ; + linux,can-disable; + debounce-interval = <15>; + gpio-key,wakeup; + }; + }; + + mtp_batterydata: qcom,battery-data { + qcom,batt-id-range-pct = <15>; + #include "qg-batterydata-alium-3600mah.dtsi" + #include "qg-batterydata-mlp356477-2800mah.dtsi" + }; + emac_hw: qcom,emac@20000 { + compatible = "qcom,emac-dwc-eqos"; + qcom,arm-smmu; + reg = <0x20000 0x10000>, + <0x36000 0x100>; + reg-names = "emac-base", "rgmii-base"; + dma-bit-mask = <32>; + emac-core-version = <7>; + interrupts-extended = <&pdc 0 660 4>, <&pdc 0 661 4>, + <&tlmm 121 2>, <&pdc 0 651 4>, + <&pdc 0 652 4>, <&pdc 0 653 4>, + <&pdc 0 654 4>, <&pdc 0 655 4>, + <&pdc 0 656 4>, <&pdc 0 657 4>, + <&pdc 0 658 4>, <&pdc 0 659 4>, + <&pdc 0 668 4>, <&pdc 0 669 4>; + interrupt-names = "sbd-intr", "lpi-intr", + "phy-intr", "tx-ch0-intr", + "tx-ch1-intr", "tx-ch2-intr", + "tx-ch3-intr", "tx-ch4-intr", + "rx-ch0-intr", "rx-ch1-intr", + "rx-ch2-intr", "rx-ch3-intr", + "ptp_pps_irq_0","ptp_pps_irq_1"; + qcom,msm-bus,name = "emac"; + qcom,msm-bus,num-cases = <4>; + qcom,msm-bus,num-paths = <2>; + qcom,msm-bus,vectors-KBps = + <98 512 0 0>, <1 781 0 0>, /* No vote */ + <98 512 1250 0>, <1 781 0 40000>, /* 10Mbps vote */ + <98 512 12500 0>, <1 781 0 40000>, /* 100Mbps vote */ + <98 512 125000 0>, <1 781 0 40000>; /* 1000Mbps vote */ + qcom,bus-vector-names = "0", "10", "100", "1000"; + clocks = <&clock_gcc GCC_EMAC_AXI_CLK>, + <&clock_gcc GCC_EMAC_PTP_CLK>, + <&clock_gcc GCC_EMAC_RGMII_CLK>, + <&clock_gcc GCC_EMAC_SLV_AHB_CLK>; + clock-names = "eth_axi_clk", "eth_ptp_clk", + "eth_rgmii_clk", "eth_slave_ahb_clk"; + qcom,phy-reset = <&tlmm 104 GPIO_ACTIVE_HIGH>; + qcom,phy-intr-redirect = <&tlmm 121 GPIO_ACTIVE_LOW>; + gdsc_emac-supply = <&emac_gdsc>; + pinctrl-names = "dev-emac-mdc", "dev-emac-mdio", + "dev-emac-rgmii_txd0_state", "dev-emac-rgmii_txd1_state", + "dev-emac-rgmii_txd2_state", "dev-emac-rgmii_txd3_state", + "dev-emac-rgmii_txc_state", "dev-emac-rgmii_tx_ctl_state", + "dev-emac-rgmii_rxd0_state", "dev-emac-rgmii_rxd1_state", + "dev-emac-rgmii_rxd2_state", "dev-emac-rgmii_rxd3_state", + "dev-emac-rgmii_rxc_state", "dev-emac-rgmii_rx_ctl_state", + "dev-emac-phy_intr", "dev-emac-phy_reset_state", + "dev-emac_pin_pps_0"; + + pinctrl-0 = <&emac_mdc>; + pinctrl-1 = <&emac_mdio>; + pinctrl-2 = <&emac_rgmii_txd0>; + pinctrl-3 = <&emac_rgmii_txd1>; + pinctrl-4 = <&emac_rgmii_txd2>; + pinctrl-5 = <&emac_rgmii_txd3>; + pinctrl-6 = <&emac_rgmii_txc>; + pinctrl-7 = <&emac_rgmii_tx_ctl>; + pinctrl-8 = <&emac_rgmii_rxd0>; + pinctrl-9 = <&emac_rgmii_rxd1>; + pinctrl-10 = <&emac_rgmii_rxd2>; + pinctrl-11 = <&emac_rgmii_rxd3>; + pinctrl-12 = <&emac_rgmii_rxc>; + pinctrl-13 = <&emac_rgmii_rx_ctl>; + pinctrl-14 = <&emac_phy_intr>; + pinctrl-15 = <&emac_phy_reset_state>; + pinctrl-16 = <&emac_pin_pps_0>; + + io-macro-info { + io-macro-bypass-mode = <0>; + io-interface = "rgmii"; + }; + emac_emb_smmu: emac_emb_smmu { + compatible = "qcom,emac-smmu-embedded"; + iommus = <&apps_smmu 0x1C0 0x0>; + qcom,iova-mapping = <0x80000000 0x40000000>; + }; + }; +}; + +&qupv3_se0_2uart { + status = "ok"; +}; + +&qupv3_se7_4uart { + status = "ok"; +}; + +&pm6150l_wled { + qcom,string-cfg= <3>; + qcom,leds-per-string = <7>; + status = "ok"; +}; + +&pm6150l_lcdb { + status = "ok"; +}; + +&ufsphy_mem { + compatible = "qcom,ufs-phy-qmp-v3-660"; + + vdda-phy-supply = <&pm6150_l4>; /* 0.9v */ + vdda-phy-always-on; + vdda-pll-supply = <&pm6150_l11>; + vdda-phy-max-microamp = <30000>; + vdda-pll-max-microamp = <12000>; + + status = "ok"; +}; + +&ufshc_mem { + vdd-hba-supply = <&ufs_phy_gdsc>; + vdd-hba-fixed-regulator; + vcc-supply = <&pm6150l_l11>; + vcc-voltage-level = <2950000 2960000>; + vccq2-supply = <&pm6150_l12>; + vcc-max-microamp = <600000>; + vccq2-max-microamp = <600000>; + + qcom,vddp-ref-clk-supply = <&pm6150l_l3>; + qcom,vddp-ref-clk-max-microamp = <100>; + qcom,vddp-ref-clk-min-uV = <1232000>; + qcom,vddp-ref-clk-max-uV = <1260000>; + + status = "ok"; +}; + +&qupv3_se1_i2c { + status = "okay"; + himax_ts@48 { + compatible = "himax,hxcommon"; + reg = <0x48>; + interrupt-parent = <&tlmm>; + interrupts = <89 0x2008>; + vdd-supply = <&pm6150_l10>; + avdd-supply = <&pm6150l_l7>; + pinctrl-names = "pmx_ts_active","pmx_ts_suspend", + "pmx_ts_release"; + pinctrl-0 = <&ts_int_active &ts_reset_active>; + pinctrl-1 = <&ts_int_suspend &ts_reset_suspend>; + pinctrl-2 = <&ts_release>; + himax,panel-coords = <0 1080 0 2160>; + himax,display-coords = <0 1080 0 2160>; + himax,irq-gpio = <&tlmm 89 0x00>; + himax,rst-gpio = <&tlmm 88 0x00>; + report_type = <1>; + }; +}; + +&qupv3_se5_i2c { + status = "ok"; + qcom,clk-freq-out = <1000000>; + + #address-cells = <1>; + #size-cells = <0>; + + nq@28 { + compatible = "qcom,nq-nci"; + reg = <0x28>; + qcom,nq-irq = <&tlmm 86 0x00>; + qcom,nq-ven = <&tlmm 84 0x00>; + qcom,nq-firm = <&tlmm 85 0x00>; + qcom,nq-clkreq = <&tlmm 50 0x00>; + interrupt-parent = <&tlmm>; + interrupts = <86 0>; + interrupt-names = "nfc_irq"; + pinctrl-names = "nfc_active", "nfc_suspend"; + pinctrl-0 = <&nfc_int_active &nfc_enable_active + &nfc_clk_req_active>; + pinctrl-1 = <&nfc_int_suspend &nfc_enable_suspend + &nfc_clk_req_suspend>; + }; +}; + +&sdhc_1 { + vdd-supply = <&pm6150l_l11>; + qcom,vdd-voltage-level = <2950000 2950000>; + qcom,vdd-current-level = <0 570000>; + + vdd-io-supply = <&pm6150_l12>; + qcom,vdd-io-always-on; + qcom,vdd-io-lpm-sup; + qcom,vdd-io-voltage-level = <1800000 1800000>; + qcom,vdd-io-current-level = <0 325000>; + + pinctrl-names = "active", "sleep"; + pinctrl-0 = <&sdc1_clk_on &sdc1_cmd_on &sdc1_data_on &sdc1_rclk_on>; + pinctrl-1 = <&sdc1_clk_off &sdc1_cmd_off &sdc1_data_off &sdc1_rclk_off>; + + status = "ok"; +}; + +&sdhc_2 { + vdd-supply = <&pm6150l_l9>; + qcom,vdd-voltage-level = <2950000 2950000>; + qcom,vdd-current-level = <0 800000>; + + vdd-io-supply = <&pm6150l_l6>; + qcom,vdd-io-voltage-level = <1800000 3100000>; + qcom,vdd-io-current-level = <0 22000>; + + pinctrl-names = "active", "sleep"; + pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on &sdc2_cd_on>; + pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off &sdc2_cd_off>; + + cd-gpios = <&tlmm 99 1>; + + status = "ok"; +}; + +&pm6150_qg { + qcom,battery-data = <&mtp_batterydata>; + qcom,qg-iterm-ma = <100>; + qcom,hold-soc-while-full; + qcom,linearize-soc; + qcom,cl-feedback-on; +}; + +&pm6150_charger { + io-channels = <&pm6150_vadc ADC_USB_IN_V_16>, + <&pm6150_vadc ADC_USB_IN_I>, + <&pm6150_vadc ADC_CHG_TEMP>, + <&pm6150_vadc ADC_DIE_TEMP>, + <&pm6150_vadc ADC_AMUX_THM4_PU2>, + <&pm6150_vadc ADC_SBUx>, + <&pm6150_vadc ADC_VPH_PWR>; + io-channel-names = "usb_in_voltage", + "usb_in_current", + "chg_temp", + "die_temp", + "conn_temp", + "sbux_res", + "vph_voltage"; + qcom,battery-data = <&mtp_batterydata>; + qcom,auto-recharge-soc = <98>; + qcom,step-charging-enable; + qcom,sw-jeita-enable; + qcom,fcc-stepping-enable; + qcom,suspend-input-on-debug-batt; + qcom,sec-charger-config = <3>; + qcom,thermal-mitigation = <4200000 3500000 3000000 + 2500000 2000000 1500000 1000000 500000>; + dpdm-supply = <&qusb_phy0>; + qcom,charger-temp-max = <800>; + qcom,smb-temp-max = <800>; +}; + +&smb1390 { + /delete-property/ interrupts; + interrupts = <0x0 0xc2 0x0 IRQ_TYPE_LEVEL_LOW>; + pinctrl-names = "default"; + pinctrl-0 = <&smb_stat_default>; + status = "ok"; +}; + +&smb1390_charger { + /delete-property/ compatible; + compatible = "qcom,smb1390-charger-psy"; + io-channels = <&pm6150_vadc ADC_AMUX_THM3>; + io-channel-names = "cp_die_temp"; + status = "ok"; +}; + +&smb1355 { + /delete-property/ interrupts; + interrupts = <0x0 0xc2 0x0 IRQ_TYPE_LEVEL_LOW>; + status = "ok"; +}; + +&smb1355_charger { + status = "ok"; +}; + +&dsi_sim_cmd { + qcom,panel-supply-entries = <&dsi_panel_pwr_supply>; + qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_dcs"; + qcom,platform-reset-gpio = <&tlmm 91 0>; +}; + +&dsi_sim_vid { + qcom,panel-supply-entries = <&dsi_panel_pwr_supply>; + qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_dcs"; + qcom,platform-reset-gpio = <&tlmm 91 0>; +}; + +&dsi_hx83112a_truly_video { + qcom,panel-supply-entries = <&dsi_panel_pwr_supply>; + qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled"; + qcom,mdss-dsi-bl-min-level = <1>; + qcom,mdss-dsi-bl-max-level = <4095>; + qcom,platform-te-gpio = <&tlmm 90 0>; + qcom,platform-reset-gpio = <&tlmm 91 0>; +}; + +&dsi_td4328_truly_video { + qcom,panel-supply-entries = <&dsi_panel_pwr_supply>; + qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled"; + qcom,mdss-dsi-bl-min-level = <1>; + qcom,mdss-dsi-bl-max-level = <4095>; + qcom,platform-te-gpio = <&tlmm 90 0>; + qcom,platform-reset-gpio = <&tlmm 91 0>; +}; + +&dsi_td4328_truly_cmd { + qcom,panel-supply-entries = <&dsi_panel_pwr_supply>; + qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled"; + qcom,mdss-dsi-bl-min-level = <1>; + qcom,mdss-dsi-bl-max-level = <4095>; + qcom,platform-te-gpio = <&tlmm 90 0>; + qcom,platform-reset-gpio = <&tlmm 91 0>; +}; + +&dsi_rm69298_truly_amoled_video { + qcom,panel-supply-entries = <&dsi_panel_pwr_supply_labibb_amoled>; + qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_dcs"; + qcom,mdss-dsi-bl-min-level = <1>; + qcom,mdss-dsi-bl-max-level = <255>; + qcom,platform-te-gpio = <&tlmm 90 0>; + qcom,platform-reset-gpio = <&tlmm 91 0>; +}; + +&dsi_rm69298_truly_amoled_cmd { + qcom,panel-supply-entries = <&dsi_panel_pwr_supply_labibb_amoled>; + qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_dcs"; + qcom,mdss-dsi-bl-min-level = <1>; + qcom,mdss-dsi-bl-max-level = <255>; + qcom,platform-te-gpio = <&tlmm 90 0>; + qcom,platform-reset-gpio = <&tlmm 91 0>; +}; + +&dsi_sharp_split_link_wuxga_video { + qcom,panel-supply-entries = <&dsi_panel_pwr_supply>; + qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_dcs"; + qcom,mdss-dsi-bl-min-level = <1>; + qcom,mdss-dsi-bl-max-level = <4095>; + qcom,platform-te-gpio = <&tlmm 90 0>; + qcom,platform-reset-gpio = <&tlmm 91 0>; +}; + +&soc { + qcom,rmnet-ipa { + status="okay"; + }; +}; + +&ipa_hw { + status="okay"; +}; + +&pil_modem_mem { + reg = <0 0x8fd00000 0 0x3100000>; +}; diff --git a/arch/arm64/boot/dts/qcom/prairie-iot-usbc-idp-overlay.dts b/arch/arm64/boot/dts/qcom/prairie-iot-usbc-idp-overlay.dts new file mode 100644 index 0000000000000000000000000000000000000000..931f6bd23ce15945a9c5e2e17b65ad8eba64d738 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/prairie-iot-usbc-idp-overlay.dts @@ -0,0 +1,31 @@ +/* Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/dts-v1/; +/plugin/; + +#include +#include + +#include "prairie-iot-idp.dtsi" +#include "sm6150-usbc-idp.dtsi" + +/ { + model = "USBC Audio IDP overlay"; + compatible = "qcom,qcs610-idp", "qcom,qcs610", "qcom,idp"; + qcom,msm-id = <401 0x0>; + qcom,board-id = <34 2>; +}; + +&dsi_hx83112a_truly_vid_display { + qcom,dsi-display-active; +}; diff --git a/arch/arm64/boot/dts/qcom/prairie-iot-usbc-idp.dts b/arch/arm64/boot/dts/qcom/prairie-iot-usbc-idp.dts new file mode 100644 index 0000000000000000000000000000000000000000..961171b0fd9ef0b59a38851481a12dd6174e733c --- /dev/null +++ b/arch/arm64/boot/dts/qcom/prairie-iot-usbc-idp.dts @@ -0,0 +1,23 @@ +/* Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/dts-v1/; + +#include "qcs610.dtsi" +#include "prairie-iot-idp.dtsi" +#include "prairie-iot-usbc-idp.dtsi" + +/ { + model = "Qualcomm Technologies, Inc. QCS610 PM6150 USBC Audio IDP"; + compatible = "qcom,qcs610-idp", "qcom,qcs610", "qcom,idp"; + qcom,board-id = <34 2>; +}; diff --git a/arch/arm64/boot/dts/qcom/prairie-iot-usbc-idp.dtsi b/arch/arm64/boot/dts/qcom/prairie-iot-usbc-idp.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..741dad2c56f57fa2e3ff5281ba47983ed9ea2774 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/prairie-iot-usbc-idp.dtsi @@ -0,0 +1,19 @@ +/* Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "sm6150-audio-overlay.dtsi" + +&sm6150_snd { + qcom,msm-mbhc-usbc-audio-supported = <1>; + qcom,msm-mbhc-hphl-swh = <0>; + qcom,msm-mbhc-gnd-swh = <0>; +}; diff --git a/arch/arm64/boot/dts/qcom/prairie-iot-usbc-minidp-idp-overlay.dts b/arch/arm64/boot/dts/qcom/prairie-iot-usbc-minidp-idp-overlay.dts new file mode 100644 index 0000000000000000000000000000000000000000..4a3c2ea8d8c90d39c129cf17714cce64536c7419 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/prairie-iot-usbc-minidp-idp-overlay.dts @@ -0,0 +1,32 @@ +/* Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/dts-v1/; +/plugin/; + +#include +#include + +#include "prairie-iot-idp.dtsi" +#include "sm6150-usbc-idp.dtsi" + +/ { + model = "USBC mini DP Primary Panel IDP overlay"; + compatible = "qcom,qcs610-idp", "qcom,qcs610", "qcom,idp"; + qcom,msm-id = <401 0x0>; + qcom,board-id = <34 4>; +}; + +&sde_dp { + qcom,dp-hpd-gpio = <&tlmm 103 0>; + qcom,dp-low-power-hw-hpd; +}; diff --git a/arch/arm64/boot/dts/qcom/prairie-iot-usbc-minidp-idp.dts b/arch/arm64/boot/dts/qcom/prairie-iot-usbc-minidp-idp.dts new file mode 100644 index 0000000000000000000000000000000000000000..a9775189b0cb9667301735e41e547983ea1c2c61 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/prairie-iot-usbc-minidp-idp.dts @@ -0,0 +1,29 @@ +/* Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/dts-v1/; + +#include "qcs610.dtsi" +#include "prairie-iot-idp.dtsi" +#include "prairie-iot-usbc-idp.dtsi" + +/ { + model = "Qualcomm Technologies, Inc. QCS610 USBC mini DP Panel IDP"; + compatible = "qcom,qcs610-idp", "qcom,qcs610", "qcom,idp"; + qcom,msm-id = <401 0x0>; + qcom,board-id = <34 4>; +}; + +&sde_dp { + qcom,dp-hpd-gpio = <&tlmm 103 0>; + qcom,dp-low-power-hw-hpd; +}; diff --git a/arch/arm64/boot/dts/qcom/qcs405-thermal.dtsi b/arch/arm64/boot/dts/qcom/qcs405-thermal.dtsi index 89ea35cba095dc0a432bd544e87371f43706c8e4..d0b3449adda074430471bfb7efc2653434abc9ca 100644 --- a/arch/arm64/boot/dts/qcom/qcs405-thermal.dtsi +++ b/arch/arm64/boot/dts/qcom/qcs405-thermal.dtsi @@ -1,4 +1,4 @@ -/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. +/* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -55,6 +55,7 @@ polling-delay = <0>; thermal-governor = "user_space"; thermal-sensors = <&tsens0 0>; + wake-capable-sensor; trips { active-config0 { temperature = <125000>; @@ -69,6 +70,7 @@ polling-delay = <0>; thermal-governor = "user_space"; thermal-sensors = <&tsens0 1>; + wake-capable-sensor; trips { active-config0 { temperature = <125000>; @@ -83,6 +85,7 @@ polling-delay = <0>; thermal-governor = "user_space"; thermal-sensors = <&tsens0 2>; + wake-capable-sensor; trips { active-config0 { temperature = <125000>; @@ -97,6 +100,7 @@ polling-delay = <0>; thermal-governor = "user_space"; thermal-sensors = <&tsens0 3>; + wake-capable-sensor; trips { active-config0 { temperature = <125000>; @@ -111,6 +115,7 @@ polling-delay = <0>; thermal-sensors = <&tsens0 4>; thermal-governor = "user_space"; + wake-capable-sensor; trips { active-config0 { temperature = <125000>; @@ -125,6 +130,7 @@ polling-delay = <0>; thermal-sensors = <&tsens0 5>; thermal-governor = "user_space"; + wake-capable-sensor; trips { active-config0 { temperature = <125000>; @@ -139,6 +145,7 @@ polling-delay = <0>; thermal-sensors = <&tsens0 6>; thermal-governor = "user_space"; + wake-capable-sensor; trips { active-config0 { temperature = <125000>; @@ -153,6 +160,7 @@ polling-delay = <0>; thermal-sensors = <&tsens0 7>; thermal-governor = "user_space"; + wake-capable-sensor; trips { active-config0 { temperature = <125000>; @@ -167,6 +175,7 @@ polling-delay = <0>; thermal-sensors = <&tsens0 8>; thermal-governor = "user_space"; + wake-capable-sensor; trips { active-config0 { temperature = <125000>; @@ -181,6 +190,7 @@ polling-delay = <0>; thermal-sensors = <&tsens0 9>; thermal-governor = "user_space"; + wake-capable-sensor; trips { active-config0 { temperature = <125000>; @@ -194,6 +204,7 @@ polling-delay-passive = <50>; polling-delay = <100>; thermal-governor = "step_wise"; + wake-capable-sensor; trips { cpu_trip:cpu-trip { temperature = <105000>; @@ -234,6 +245,7 @@ polling-delay = <0>; thermal-sensors = <&tsens0 9>; thermal-governor = "step_wise"; + wake-capable-sensor; trips { gpu_step_trip: gpu-step-trip { temperature = <105000>; @@ -256,6 +268,7 @@ polling-delay = <0>; thermal-sensors = <&tsens0 5>; thermal-governor = "step_wise"; + wake-capable-sensor; trips { cpuss_0_step_trip: cpuss-0-step-trip { temperature = <118000>; @@ -278,6 +291,7 @@ polling-delay = <0>; thermal-sensors = <&tsens0 6>; thermal-governor = "step_wise"; + wake-capable-sensor; trips { cpuss_1_step_trip: cpuss-1-step-trip { temperature = <118000>; @@ -300,6 +314,7 @@ polling-delay = <0>; thermal-sensors = <&tsens0 7>; thermal-governor = "step_wise"; + wake-capable-sensor; trips { cpuss_2_step_trip: cpuss-2-step-trip { temperature = <118000>; @@ -322,6 +337,7 @@ polling-delay = <0>; thermal-sensors = <&tsens0 8>; thermal-governor = "step_wise"; + wake-capable-sensor; trips { cpuss_3_step_trip: cpuss-3-step-trip { temperature = <118000>; @@ -344,6 +360,7 @@ polling-delay = <0>; thermal-governor = "low_limits_cap"; thermal-sensors = <&tsens0 0>; + wake-capable-sensor; tracks-low; trips { aoss_lowc: aoss-lowc { @@ -365,6 +382,7 @@ polling-delay = <0>; thermal-governor = "low_limits_floor"; thermal-sensors = <&tsens0 0>; + wake-capable-sensor; tracks-low; trips { aoss_lowf: aoss-lowf { @@ -399,6 +417,15 @@ polling-delay = <0>; thermal-governor = "user_space"; thermal-sensors = <&pms405_adc_tm_iio ADC_XO_THERM_PU2>; + wake-capable-sensor; + + trips { + active-config0 { + temperature = <125000>; + hysteresis = <1000>; + type = "passive"; + }; + }; }; pa-therm1-usr { @@ -406,6 +433,15 @@ polling-delay = <0>; thermal-governor = "user_space"; thermal-sensors = <&pms405_adc_tm_iio ADC_AMUX_THM1_PU2>; + wake-capable-sensor; + + trips { + active-config0 { + temperature = <125000>; + hysteresis = <1000>; + type = "passive"; + }; + }; }; pa-therm3-usr { @@ -413,5 +449,14 @@ polling-delay = <0>; thermal-governor = "user_space"; thermal-sensors = <&pms405_adc_tm_iio ADC_AMUX_THM3_PU2>; + wake-capable-sensor; + + trips { + active-config0 { + temperature = <125000>; + hysteresis = <1000>; + type = "passive"; + }; + }; }; }; diff --git a/arch/arm64/boot/dts/qcom/qcs405.dtsi b/arch/arm64/boot/dts/qcom/qcs405.dtsi index 52aff5591aea8ef0b4e5cf1c261785c823cb991f..fea11fe9960e76008798709c6930dc22c85919f4 100644 --- a/arch/arm64/boot/dts/qcom/qcs405.dtsi +++ b/arch/arm64/boot/dts/qcom/qcs405.dtsi @@ -1695,6 +1695,7 @@ compatible = "qcom,emac-smmu-embedded"; iommus = <&apps_smmu 0x1400 0x0>; qcom,iova-mapping = <0x80000000 0x40000000>; + qcom,smmu-geometry; }; }; diff --git a/arch/arm64/boot/dts/qcom/qcs610-iot.dtsi b/arch/arm64/boot/dts/qcom/qcs610-iot.dtsi index 3397b23ca9ba2ec65cfaf94c2f334068278c4bbb..45170c7acc497324533fef0bc30c765e84a33218 100644 --- a/arch/arm64/boot/dts/qcom/qcs610-iot.dtsi +++ b/arch/arm64/boot/dts/qcom/qcs610-iot.dtsi @@ -183,6 +183,7 @@ compatible = "qcom,emac-smmu-embedded"; iommus = <&apps_smmu 0x1C0 0x0>; qcom,iova-mapping = <0x80000000 0x40000000>; + qcom,smmu-geometry; }; }; diff --git a/arch/arm64/boot/dts/qcom/quin-vm-common.dtsi b/arch/arm64/boot/dts/qcom/quin-vm-common.dtsi index 5c5abef195ae8f54c71b926d230a6bd2168d1ee3..f67d789fa6e2d694535de2c7268eae74703fd1f8 100644 --- a/arch/arm64/boot/dts/qcom/quin-vm-common.dtsi +++ b/arch/arm64/boot/dts/qcom/quin-vm-common.dtsi @@ -29,7 +29,7 @@ ranges; /* global autoconfigured region for contiguous allocations */ - linux,cma { + linux_cma: linux,cma { compatible = "shared-dma-pool"; alloc-ranges = <0x0 0x00000000 0x0 0xffffffff>; reusable; diff --git a/arch/arm64/boot/dts/qcom/sa2145p-ccard-nand-dc.dts b/arch/arm64/boot/dts/qcom/sa2145p-ccard-nand-dc.dts index 4f84d3aa500d2bad1b614d8298303d373325357b..6f9c3f0c6ffd66d294dba4bece0910389d9330e6 100644 --- a/arch/arm64/boot/dts/qcom/sa2145p-ccard-nand-dc.dts +++ b/arch/arm64/boot/dts/qcom/sa2145p-ccard-nand-dc.dts @@ -56,7 +56,7 @@ }; &tlmm { - wakeup_gpio_default: wakeup_gpio_default { + sdx_ext_ipc_gpio_default: sdx_ext_ipc_gpio_default { mux { pins = "gpio77"; function = "gpio"; @@ -111,3 +111,19 @@ "dev-emac-rgmii_lvl_shift_state"; pinctrl-16 = <&rgmii_level_shifter>; }; + +&gpu_bw_tbl { + status = "disabled"; +}; + +&gpubw { + status = "disabled"; +}; + +&msm_gpu { + status = "disabled"; +}; + +&kgsl_msm_iommu { + status = "disabled"; +}; diff --git a/arch/arm64/boot/dts/qcom/sa2145p-ccard.dtsi b/arch/arm64/boot/dts/qcom/sa2145p-ccard.dtsi index 882275bc970d0597a008a464553002c24c84b233..d9e63143b783313e57df55f81f1ef69c033db3c8 100644 --- a/arch/arm64/boot/dts/qcom/sa2145p-ccard.dtsi +++ b/arch/arm64/boot/dts/qcom/sa2145p-ccard.dtsi @@ -25,14 +25,14 @@ }; }; - wakeup_gpio_default: wakeup_gpio_default { + sdx_ext_ipc_gpio_default: sdx_ext_ipc_gpio_default { mux { - pins = "gpio79"; + pins = "gpio14", "gpio79"; function = "gpio"; }; config { - pins = "gpio79"; + pins = "gpio14", "gpio79"; drive-strength = <2>; bias-pull-down; }; @@ -170,6 +170,9 @@ }; qmi-tmd-devices { + + /delete-node/ modem; + adsp { qcom,instance-id = <0x1>; @@ -185,13 +188,16 @@ qcom,wakeup-gpio-in = <&tlmm 14 0x00>; qcom,wakeup-gpio-out = <&tlmm 79 0x00>; pinctrl-names = "default"; - pinctrl-0 = <&wakeup_gpio_default>; + pinctrl-0 = <&sdx_ext_ipc_gpio_default>; }; }; &thermal_zones { aoss-lowf { cooling-maps { + + /delete-node/ modem_vdd_cdev; + adsp_vdd_cdev { trip = <&aoss_lowf>; cooling-device = <&adsp_vdd 0 0>; @@ -200,6 +206,8 @@ }; }; +#include "sdxprairie-thermal-common.dtsi" + &usb3 { qcom,ignore-wakeup-src-in-hostmode; }; diff --git a/arch/arm64/boot/dts/qcom/sa2150p-ccard-emmc-eth-phy.dts b/arch/arm64/boot/dts/qcom/sa2150p-ccard-emmc-eth-phy.dts index f050bda84348668ba2c048af311a50b85cb81b60..f9a6262865c73c45eb1935d3c8b91b7ea5115dac 100644 --- a/arch/arm64/boot/dts/qcom/sa2150p-ccard-emmc-eth-phy.dts +++ b/arch/arm64/boot/dts/qcom/sa2150p-ccard-emmc-eth-phy.dts @@ -20,3 +20,34 @@ "qcom,ccard"; qcom,board-id = <0x030019 0>; }; + +&mtl_rx_setup { + queue2 { + snps,dcb-algorithm; + }; + + queue3 { + snps,dcb-algorithm; + }; +}; + +&mtl_tx_setup { + queue2 { + snps,dcb-algorithm; + }; + + queue3 { + snps,dcb-algorithm; + }; +}; + +ðqos_hw { + qcom,qoe_mode = <1>; + qcom,qoe-queue = <2>; + qcom,qoe-vlan-offset = <0>; + snps,mtl-rx-config = <&mtl_rx_setup>; + snps,mtl-tx-config = <&mtl_tx_setup>; + qcom,cv2x_mode = <2>; + qcom,cv2x-queue = <3>; + qcom,cv2x-vlan-offset = <1>; +}; diff --git a/arch/arm64/boot/dts/qcom/sa2150p-ccard-emmc-eth-sw.dts b/arch/arm64/boot/dts/qcom/sa2150p-ccard-emmc-eth-sw.dts index 804f361ee9a5d678844e5bf4036b98e752e8ca93..050e79ee5898f29aea8cc946a074b058dd24b877 100644 --- a/arch/arm64/boot/dts/qcom/sa2150p-ccard-emmc-eth-sw.dts +++ b/arch/arm64/boot/dts/qcom/sa2150p-ccard-emmc-eth-sw.dts @@ -20,3 +20,36 @@ "qcom,ccard"; qcom,board-id = <0x020019 0>; }; + +&mtl_rx_setup { + queue2 { + snps,dcb-algorithm; + }; + + queue3 { + snps,dcb-algorithm; + }; +}; + +&mtl_tx_setup { + queue2 { + snps,dcb-algorithm; + }; + + queue3 { + snps,dcb-algorithm; + }; +}; + +ðqos_hw { + qcom,qoe_mode = <1>; + qcom,qoe-queue = <2>; + qcom,qoe-vlan-offset = <0>; + snps,mtl-rx-config = <&mtl_rx_setup>; + snps,mtl-tx-config = <&mtl_tx_setup>; + qcom,cv2x_mode = <2>; + qcom,cv2x-queue = <3>; + qcom,cv2x-vlan-offset = <1>; + mac2mac; + mac2mac-rgmii-speed = <1000>; +}; diff --git a/arch/arm64/boot/dts/qcom/sa2150p-ccard-nand-dc.dts b/arch/arm64/boot/dts/qcom/sa2150p-ccard-nand-dc.dts index 3f9b8b2428e7b5a0b7982330ee44113fbe24c3f2..b5dbb948ced877253fea21baa2f545121a000c77 100644 --- a/arch/arm64/boot/dts/qcom/sa2150p-ccard-nand-dc.dts +++ b/arch/arm64/boot/dts/qcom/sa2150p-ccard-nand-dc.dts @@ -130,7 +130,7 @@ }; &tlmm { - wakeup_gpio_default: wakeup_gpio_default { + sdx_ext_ipc_gpio_default: sdx_ext_ipc_gpio_default { mux { pins = "gpio77"; function = "gpio"; @@ -185,3 +185,20 @@ "dev-emac-rgmii_lvl_shift_state"; pinctrl-16 = <&rgmii_level_shifter>; }; + +&gpu_bw_tbl { + status = "disabled"; +}; + +&gpubw { + status = "disabled"; +}; + +&msm_gpu { + status = "disabled"; +}; + +&kgsl_msm_iommu { + status = "disabled"; +}; + diff --git a/arch/arm64/boot/dts/qcom/sa2150p-ccard-nand.dts b/arch/arm64/boot/dts/qcom/sa2150p-ccard-nand.dts index 828e3ea73c42de3d4be597d15b1f1f5ffe0a9fa7..eaa5c8d7d97698cce6b4d9b639c85e87ab146318 100644 --- a/arch/arm64/boot/dts/qcom/sa2150p-ccard-nand.dts +++ b/arch/arm64/boot/dts/qcom/sa2150p-ccard-nand.dts @@ -1,4 +1,4 @@ -/* Copyright (c) 2019, The Linux Foundation. All rights reserved. +/* Copyright (c) 2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -99,3 +99,20 @@ &sdhc_1 { status = "disabled"; }; + +&gpu_bw_tbl { + status = "disabled"; +}; + +&gpubw { + status = "disabled"; +}; + +&msm_gpu { + status = "disabled"; +}; + +&kgsl_msm_iommu { + status = "disabled"; +}; + diff --git a/arch/arm64/boot/dts/qcom/sa2150p-ccard.dtsi b/arch/arm64/boot/dts/qcom/sa2150p-ccard.dtsi index 5988b66988b784f50bcdde0fb85e5460ccfbc9cb..d204ea661ddd319c8255a70517936f8b8b2d602d 100644 --- a/arch/arm64/boot/dts/qcom/sa2150p-ccard.dtsi +++ b/arch/arm64/boot/dts/qcom/sa2150p-ccard.dtsi @@ -25,14 +25,14 @@ }; }; - wakeup_gpio_default: wakeup_gpio_default { + sdx_ext_ipc_gpio_default: sdx_ext_ipc_gpio_default { mux { - pins = "gpio79"; + pins = "gpio14", "gpio79"; function = "gpio"; }; config { - pins = "gpio79"; + pins = "gpio14", "gpio79"; drive-strength = <2>; bias-pull-down; }; @@ -171,6 +171,9 @@ }; qmi-tmd-devices { + + /delete-node/ modem; + adsp { qcom,instance-id = <0x1>; @@ -186,13 +189,16 @@ qcom,wakeup-gpio-in = <&tlmm 14 0x00>; qcom,wakeup-gpio-out = <&tlmm 79 0x00>; pinctrl-names = "default"; - pinctrl-0 = <&wakeup_gpio_default>; + pinctrl-0 = <&sdx_ext_ipc_gpio_default>; }; }; &thermal_zones { aoss-lowf { cooling-maps { + + /delete-node/ modem_vdd_cdev; + adsp_vdd_cdev { trip = <&aoss_lowf>; cooling-device = <&adsp_vdd 0 0>; @@ -201,6 +207,8 @@ }; }; +#include "sdxprairie-thermal-common.dtsi" + &usb3 { qcom,ignore-wakeup-src-in-hostmode; }; @@ -213,39 +221,11 @@ extcon = <&usb2_extcon>; }; -&mtl_rx_setup { - queue2 { - snps,dcb-algorithm; - }; - - queue3 { - snps,dcb-algorithm; - }; -}; - -&mtl_tx_setup { - queue2 { - snps,dcb-algorithm; - }; - - queue3 { - snps,dcb-algorithm; - }; -}; - ðqos_hw { status = "okay"; vreg_emac_phy-supply = <&vreg_emac_phy>; vreg_rgmii_io_pads-supply = <&vreg_rgmii_io_pads>; rxc-skew-ps = <0>; - qcom,qoe_mode = <1>; - qcom,qoe-queue = <2>; - qcom,qoe-vlan-offset = <0>; - snps,mtl-rx-config = <&mtl_rx_setup>; - snps,mtl-tx-config = <&mtl_tx_setup>; - qcom,cv2x_mode = <2>; - qcom,cv2x-queue = <3>; - qcom,cv2x-vlan-offset = <1>; pinctrl-names = "dev-emac-mdc", "dev-emac-mdio", "dev-emac-rgmii_txd0_state", "dev-emac-rgmii_txd1_state", diff --git a/arch/arm64/boot/dts/qcom/sa515m-ccard.dtsi b/arch/arm64/boot/dts/qcom/sa515m-ccard.dtsi index 251b24cce5f1ad1b72427adc6fc1144ed3d303d6..54167eda44180411d5a05bf10177f5409c5350f5 100644 --- a/arch/arm64/boot/dts/qcom/sa515m-ccard.dtsi +++ b/arch/arm64/boot/dts/qcom/sa515m-ccard.dtsi @@ -411,8 +411,8 @@ pinctrl-1 = <&cnss_wlan_en_sleep>; chip_cfg@0 { - reg = <0x10000000 0x10000000>, - <0x20000000 0x10000>; + reg = <0xa0000000 0x10000000>, + <0xb0000000 0x10000>; reg-names = "smmu_iova_base", "smmu_iova_ipa"; supported-ids = <0x003e>; diff --git a/arch/arm64/boot/dts/qcom/sa515m-v2-ccard-eth-phy-ep.dts b/arch/arm64/boot/dts/qcom/sa515m-v2-ccard-eth-phy-ep.dts index e4f3e8d8bbdc47710314a2c05217c86f78b0fccb..09ed287d9d7cf53a02c9cc2936ad6a43780a7a23 100644 --- a/arch/arm64/boot/dts/qcom/sa515m-v2-ccard-eth-phy-ep.dts +++ b/arch/arm64/boot/dts/qcom/sa515m-v2-ccard-eth-phy-ep.dts @@ -54,3 +54,81 @@ qcom,default-policy-nop; status = "okay"; }; + +&mtl_rx_setup { + queue2 { + snps,dcb-algorithm; + }; + + queue3 { + snps,dcb-algorithm; + }; +}; + +&mtl_tx_setup { + queue2 { + snps,dcb-algorithm; + }; + + queue3 { + snps,dcb-algorithm; + }; +}; + +ðqos_hw { + qcom,qoe_mode = <1>; + qcom,qoe-queue = <2>; + qcom,qoe-vlan-offset = <0>; + snps,mtl-rx-config = <&mtl_rx_setup>; + snps,mtl-tx-config = <&mtl_tx_setup>; + qcom,cv2x_mode = <1>; + qcom,cv2x-queue = <3>; + qcom,cv2x-vlan-offset = <1>; + mac2mac; + mac2mac-rgmii-speed = <1000>; +}; + +&spi_1 { + status = "okay"; + + sja1105: ethernet-switch@0{ + compatible = "qcom,nxp,sja1105p-switch"; + reg = <0>; + spi-max-frequency = <12000000>; + spi-cpha; + switch-speed = <1000>; + pinctrl-names = "default"; + pinctrl-0 = <&sja1105_default>; + qcom,reset-gpio = <&tlmm 91 0x1>; + + port-0 { + null-phy = <0x1>; + phy-ref = < 0 >; + logical-port-num = < 0 >; + }; + + port-1 { + null-phy = <0x1>; + phy-ref = < 0 >; + logical-port-num = < 1 >; + }; + + port-2 { + null-phy = <0x1>; + phy-ref = < 0 >; + logical-port-num = < 2 >; + }; + + port-3 { + null-phy = <0x1>; + phy-ref = < 0 >; + logical-port-num = < 3 >; + }; + + port-4 { + null-phy = <0x1>; + phy-ref = < 0 >; + logical-port-num = < 4 >; + }; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/sa6155-adp-air-overlay.dts b/arch/arm64/boot/dts/qcom/sa6155-adp-air-overlay.dts index 72a605e98aaa8bc87f0a49de2aefa1fc9a05d56e..ef3d1cc83f7190901b68ac9d8e2f194d9893998c 100644 --- a/arch/arm64/boot/dts/qcom/sa6155-adp-air-overlay.dts +++ b/arch/arm64/boot/dts/qcom/sa6155-adp-air-overlay.dts @@ -1,4 +1,4 @@ -/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. +/* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -16,6 +16,7 @@ #include #include "sa6155-adp-air.dtsi" +#include "sa6155-display-shd.dtsi" / { model = "Qualcomm Technologies, Inc. SA6155 ADP-AIR"; diff --git a/arch/arm64/boot/dts/qcom/sa6155-adp-air.dtsi b/arch/arm64/boot/dts/qcom/sa6155-adp-air.dtsi index 3f9013f63bd8ec9a82fee275f842e30ca838c9dd..cb6f73ddc21fce83110c56b1285cde8fa30b1789 100644 --- a/arch/arm64/boot/dts/qcom/sa6155-adp-air.dtsi +++ b/arch/arm64/boot/dts/qcom/sa6155-adp-air.dtsi @@ -12,6 +12,7 @@ #include #include #include "sa6155-cnss.dtsi" +#include "sa6155-display.dtsi" &qupv3_2 { status = "ok"; @@ -70,6 +71,9 @@ "csr_hclk"; number-of-rate-detectors = <2>; rate-detector-interfaces = <0 1>; + iommus = <&apps_smmu 0x035C 0x1>; + qcom,smmu-s1-bypass; + qcom,iova-mapping = <0x0 0xFFFFFFFF>; sdr0: qcom,hs0_i2s { compatible = "qcom,hsi2s-interface"; @@ -81,9 +85,6 @@ &hs0_i2s_data1_sleep>; clocks = <&clock_gcc GCC_SDR_PRI_MI2S_CLK>; clock-names = "pri_mi2s_clk"; - iommus = <&apps_smmu 0x035C 0x0>; - qcom,smmu-s1-bypass; - qcom,iova-mapping = <0x0 0xFFFFFFFF>; bit-clock-hz = <12288000>; data-buffer-ms = <10>; bit-depth = <32>; @@ -113,9 +114,6 @@ &hs1_i2s_data1_sleep>; clocks = <&clock_gcc GCC_SDR_SEC_MI2S_CLK>; clock-names = "sec_mi2s_clk"; - iommus = <&apps_smmu 0x035D 0x0>; - qcom,smmu-s1-bypass; - qcom,iova-mapping = <0x0 0xFFFFFFFF>; bit-clock-hz = <12288000>; data-buffer-ms = <10>; bit-depth = <32>; diff --git a/arch/arm64/boot/dts/qcom/sa6155-adp-star-overlay.dts b/arch/arm64/boot/dts/qcom/sa6155-adp-star-overlay.dts index 4eb2f21bd0f6c516083754b867756bb9d8396a87..7ab6f1121e097264c3a03d5c1e3ae998a1e76c02 100644 --- a/arch/arm64/boot/dts/qcom/sa6155-adp-star-overlay.dts +++ b/arch/arm64/boot/dts/qcom/sa6155-adp-star-overlay.dts @@ -1,4 +1,4 @@ -/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. +/* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -16,6 +16,7 @@ #include #include "sa6155-adp-star.dtsi" +#include "sa6155-display-shd.dtsi" / { model = "Qualcomm Technologies, Inc. SA6155 ADP-STAR"; diff --git a/arch/arm64/boot/dts/qcom/sa6155-adp-star.dtsi b/arch/arm64/boot/dts/qcom/sa6155-adp-star.dtsi index ab3647ea3828b9d721fefaa70821cb733f29be0a..4577d99129747bc7f2c17b59ce49b6407747fdbf 100644 --- a/arch/arm64/boot/dts/qcom/sa6155-adp-star.dtsi +++ b/arch/arm64/boot/dts/qcom/sa6155-adp-star.dtsi @@ -12,6 +12,7 @@ #include #include #include "sa6155-cnss.dtsi" +#include "sa6155-display.dtsi" &qupv3_2 { status = "ok"; @@ -63,6 +64,9 @@ "csr_hclk"; number-of-rate-detectors = <2>; rate-detector-interfaces = <0 1>; + iommus = <&apps_smmu 0x035C 0x1>; + qcom,smmu-s1-bypass; + qcom,iova-mapping = <0x0 0xFFFFFFFF>; sdr0: qcom,hs0_i2s { compatible = "qcom,hsi2s-interface"; @@ -74,9 +78,6 @@ &hs0_i2s_data1_sleep>; clocks = <&clock_gcc GCC_SDR_PRI_MI2S_CLK>; clock-names = "pri_mi2s_clk"; - iommus = <&apps_smmu 0x035C 0x0>; - qcom,smmu-s1-bypass; - qcom,iova-mapping = <0x0 0xFFFFFFFF>; bit-clock-hz = <12288000>; data-buffer-ms = <10>; bit-depth = <32>; @@ -106,9 +107,6 @@ &hs1_i2s_data1_sleep>; clocks = <&clock_gcc GCC_SDR_SEC_MI2S_CLK>; clock-names = "sec_mi2s_clk"; - iommus = <&apps_smmu 0x035D 0x0>; - qcom,smmu-s1-bypass; - qcom,iova-mapping = <0x0 0xFFFFFFFF>; bit-clock-hz = <12288000>; data-buffer-ms = <10>; bit-depth = <32>; diff --git a/arch/arm64/boot/dts/qcom/sa6155-display-shd.dtsi b/arch/arm64/boot/dts/qcom/sa6155-display-shd.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..0b803070c63c991c62bd5da0175f5f70d1b31918 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sa6155-display-shd.dtsi @@ -0,0 +1,83 @@ +/* Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +&soc { + sde_sh_base0: qcom,shared-display-base@0 { + qcom,shared-display-base-intf = <1>; + }; + + sde_sh0: qcom,shared-display@0 { + compatible = "qcom,shared-display"; + qcom,shared-display-base = <&sde_sh_base0>; + qcom,blend-stage-range = <0 6>; + qcom,shared-display-name = "shared-disp-0"; + qcom,display-type = "primary"; + }; + + sde_sh1: qcom,shared-display@1 { + compatible = "qcom,shared-display"; + qcom,shared-display-base = <&sde_sh_base0>; + qcom,blend-stage-range = <6 1>; + qcom,shared-display-name = "shared-disp-1"; + }; + + sde_sh2: qcom,shared-display@2 { + compatible = "qcom,shared-display"; + qcom,shared-display-base = <&sde_sh_base0>; + qcom,blend-stage-range = <7 1>; + qcom,shared-display-name = "shared-disp-2"; + }; + + sde_shp: qcom,sde-shared-plane { + compatible = "qcom,sde-shared-plane"; + qcom,add-planes { + plane@0 { + qcom,plane-name = "plane-4-splash"; + qcom,plane-parent = "plane-4"; + qcom,plane-init-active; + }; + plane@1 { + qcom,plane-name = "plane-3-splash"; + qcom,plane-parent = "plane-3"; + qcom,plane-init-active; + qcom,plane-init-handoff; + }; + }; + }; + + sde_card1: qcom,sde-kms-lease@0 { + compatible = "qcom,sde-kms-lease"; + qcom,dev-name = "msm_drm"; + }; + + sde_card2: qcom,sde-kms-lease@1 { + compatible = "qcom,sde-kms-lease"; + qcom,dev-name = "msm_drm2"; + qcom,lease-connectors = "shared-disp-2"; + qcom,lease-planes = "plane-4-splash"; + }; + + sde_card3: qcom,sde-kms-lease@2 { + compatible = "qcom,sde-kms-lease"; + qcom,dev-name = "msm_drm3"; + qcom,lease-connectors = "shared-disp-1"; + qcom,lease-planes = "plane-3-splash"; + }; +}; + +&mdss_mdp { + qcom,sde-reg-dma-version = <0>; + qcom,sde-crtc-num-pref = <5>; + connectors = <&dsi_dp1 &sde_wb &sde_dp + &sde_sh0 &sde_sh1 &sde_sh2 &sde_shp + &sde_card1 &sde_card2 &sde_card3>; +}; diff --git a/arch/arm64/boot/dts/qcom/sa6155-display.dtsi b/arch/arm64/boot/dts/qcom/sa6155-display.dtsi index e0d3f7a7264a8e85a31299865ab2d688f8cdbd58..b8e3f024dbed4bba2e1918213c122e9a19ac9383 100644 --- a/arch/arm64/boot/dts/qcom/sa6155-display.dtsi +++ b/arch/arm64/boot/dts/qcom/sa6155-display.dtsi @@ -48,10 +48,11 @@ vdda-1p2-supply = <&pm6155_1_l11>; vdda-0p9-supply = <&pm6155_1_l5>; - /delete-property/ qcom,dp-aux-switch; - /delete-property/ pinctrl-names; + qcom,dp-aux-switch = <0>; + pinctrl-names = <0>; qcom,mst-enable; qcom,dp-aux-bridge-sim = <&sde_dp_mst_sim>; + qcom,widebus-enable; qcom,core-supply-entries { #address-cells = <1>; @@ -221,77 +222,10 @@ qcom,rmnet-ipa { status="disabled"; }; - - sde_sh_base0: qcom,shared-display-base@0 { - qcom,shared-display-base-intf = <1>; - }; - - sde_sh0: qcom,shared-display@0 { - compatible = "qcom,shared-display"; - qcom,shared-display-base = <&sde_sh_base0>; - qcom,blend-stage-range = <0 6>; - qcom,shared-display-name = "shared-disp-0"; - qcom,display-type = "primary"; - }; - - sde_sh1: qcom,shared-display@1 { - compatible = "qcom,shared-display"; - qcom,shared-display-base = <&sde_sh_base0>; - qcom,blend-stage-range = <6 1>; - qcom,shared-display-name = "shared-disp-1"; - }; - - sde_sh2: qcom,shared-display@2 { - compatible = "qcom,shared-display"; - qcom,shared-display-base = <&sde_sh_base0>; - qcom,blend-stage-range = <7 1>; - qcom,shared-display-name = "shared-disp-2"; - }; - - sde_shp: qcom,sde-shared-plane { - compatible = "qcom,sde-shared-plane"; - qcom,add-planes { - plane@0 { - qcom,plane-name = "plane-4-splash"; - qcom,plane-parent = "plane-4"; - qcom,plane-init-active; - }; - plane@1 { - qcom,plane-name = "plane-3-splash"; - qcom,plane-parent = "plane-3"; - qcom,plane-init-active; - qcom,plane-init-handoff; - }; - }; - }; - - sde_card1: qcom,sde-kms-lease@0 { - compatible = "qcom,sde-kms-lease"; - qcom,dev-name = "msm_drm"; - }; - - sde_card2: qcom,sde-kms-lease@1 { - compatible = "qcom,sde-kms-lease"; - qcom,dev-name = "msm_drm2"; - qcom,lease-connectors = "shared-disp-2"; - qcom,lease-planes = "plane-4-splash"; - }; - - sde_card3: qcom,sde-kms-lease@2 { - compatible = "qcom,sde-kms-lease"; - qcom,dev-name = "msm_drm3"; - qcom,lease-connectors = "shared-disp-1"; - qcom,lease-planes = "plane-3-splash"; - }; }; &mdss_mdp { sde-vdd-supply = <&mdss_core_gdsc>; - qcom,sde-reg-dma-version = <0>; - qcom,sde-crtc-num-pref = <5>; - connectors = <&dsi_dp1 &sde_wb &sde_dp - &sde_sh0 &sde_sh1 &sde_sh2 &sde_shp - &sde_card1 &sde_card2 &sde_card3>; qcom,platform-supply-entries { #address-cells = <1>; #size-cells = <0>; diff --git a/arch/arm64/boot/dts/qcom/sa6155.dtsi b/arch/arm64/boot/dts/qcom/sa6155.dtsi index 23f13ebd7e6f5849fec2167314de8d2c75c53938..f0118b247601e3e4a44b339217f6e61353190acf 100644 --- a/arch/arm64/boot/dts/qcom/sa6155.dtsi +++ b/arch/arm64/boot/dts/qcom/sa6155.dtsi @@ -12,7 +12,6 @@ #include "sm6150.dtsi" #include "sa6155-pmic.dtsi" -#include "sa6155-display.dtsi" / { model = "Qualcomm Technologies, Inc. SA6155"; @@ -569,3 +568,7 @@ }; }; }; + +&kgsl_msm_iommu { + qcom,secure-size = <0x20000000>; /* 512MB */ +}; diff --git a/arch/arm64/boot/dts/qcom/sa6155p-adp-air-overlay.dts b/arch/arm64/boot/dts/qcom/sa6155p-adp-air-overlay.dts index 2d61e9f82b26ec7c5e652bb2ca93ef88a524b48b..7d56612b1e019d168988b90d4485a15dfec683e2 100644 --- a/arch/arm64/boot/dts/qcom/sa6155p-adp-air-overlay.dts +++ b/arch/arm64/boot/dts/qcom/sa6155p-adp-air-overlay.dts @@ -1,4 +1,4 @@ -/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. +/* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -17,6 +17,7 @@ #include #include "sa6155-adp-air.dtsi" +#include "sa6155-display-shd.dtsi" / { model = "Qualcomm Technologies, Inc. SA6155P ADP-AIR"; diff --git a/arch/arm64/boot/dts/qcom/sa6155p-adp-star-overlay.dts b/arch/arm64/boot/dts/qcom/sa6155p-adp-star-overlay.dts index af524dee576b2da6185bbed0cd9ea2b8d7ae2b9a..8fafa2eaaf84c52ed0f3447a6683cafd70a32a6b 100644 --- a/arch/arm64/boot/dts/qcom/sa6155p-adp-star-overlay.dts +++ b/arch/arm64/boot/dts/qcom/sa6155p-adp-star-overlay.dts @@ -1,4 +1,4 @@ -/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. +/* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -17,6 +17,7 @@ #include #include "sa6155-adp-star.dtsi" +#include "sa6155-display-shd.dtsi" / { model = "Qualcomm Technologies, Inc. SA6155P ADP-STAR"; diff --git a/arch/arm64/boot/dts/qcom/sa6155p-v2-adp-air-display-lxc-shd.dtsi b/arch/arm64/boot/dts/qcom/sa6155p-v2-adp-air-display-lxc-shd.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..a1b924f4c5fa8f40e865932c41454e91952789d0 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sa6155p-v2-adp-air-display-lxc-shd.dtsi @@ -0,0 +1,81 @@ +/* Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +&soc { + sde_sh_base0: qcom,shared-display-base@0 { + qcom,shared-display-base-intf = <1>; + }; + + sde_sh_base1: qcom,shared-display-base@1 { + qcom,shared-display-base-intf = <0>; + qcom,shared-display-base-mst; + }; + + sde_sh_base2: qcom,shared-display-base@2 { + qcom,shared-display-base-intf = <3>; + qcom,shared-display-base-mst; + }; + + sde_sh0: qcom,shared-display@0 { + compatible = "qcom,shared-display"; + qcom,shared-display-base = <&sde_sh_base0>; + qcom,blend-stage-range = <0 4>; + qcom,shared-display-name = "shared-disp-0"; + qcom,display-type = "primary"; + }; + + sde_sh1: qcom,shared-display@1 { + compatible = "qcom,shared-display"; + qcom,shared-display-base = <&sde_sh_base1>; + qcom,blend-stage-range = <0 4>; + qcom,shared-display-name = "shared-disp-1"; + qcom,display-type = "primary"; + }; + + sde_sh2: qcom,shared-display@2 { + compatible = "qcom,shared-display"; + qcom,shared-display-base = <&sde_sh_base2>; + qcom,blend-stage-range = <0 2>; + qcom,shared-display-name = "shared-disp-2"; + qcom,display-type = "primary"; + }; + + sde_card1: qcom,sde-kms-lease@0 { + compatible = "qcom,sde-kms-lease"; + qcom,dev-name = "msm_drm"; + qcom,lease-connectors = "shared-disp-0"; + qcom,lease-planes = "plane-0","plane-1","plane-9", + "plane-5"; + }; + + sde_card2: qcom,sde-kms-lease@1 { + compatible = "qcom,sde-kms-lease"; + qcom,dev-name = "msm_drm"; + qcom,lease-connectors = "shared-disp-1"; + qcom,lease-planes = "plane-2","plane-6","plane-3","plane-7"; + }; + + sde_card3: qcom,sde-kms-lease@2 { + compatible = "qcom,sde-kms-lease"; + qcom,dev-name = "msm_drm"; + qcom,lease-connectors = "shared-disp-2"; + qcom,lease-planes = "plane-4","plane-8"; + }; +}; + +&mdss_mdp { + qcom,sde-reg-dma-version = <0>; + qcom,sde-crtc-num-pref = <5>; + connectors = <&sde_rscc &dsi_dp1 &sde_wb &sde_dp + &sde_sh0 &sde_sh1 &sde_sh2 + &sde_card1 &sde_card2 &sde_card3>; +}; diff --git a/arch/arm64/boot/dts/qcom/sa6155p-v2-adp-air-lxc-overlay.dts b/arch/arm64/boot/dts/qcom/sa6155p-v2-adp-air-lxc-overlay.dts new file mode 100644 index 0000000000000000000000000000000000000000..a189085f23b076a4a4acc6968b7d92ad505c8ecc --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sa6155p-v2-adp-air-lxc-overlay.dts @@ -0,0 +1,41 @@ +/* Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/dts-v1/; +/plugin/; + +#include +#include + +#include "sa6155-adp-air.dtsi" +#include "sa6155-display.dtsi" +#include "sa6155p-v2-adp-air-display-lxc-shd.dtsi" + +/ { + model = "Qualcomm Technologies, Inc. SA6155P ADP-AIR V2"; + compatible = "qcom,sa6155p-adp-air", "qcom,sa6155p", "qcom,adp-air"; + qcom,msm-id = <377 0x0>, <380 0>; + qcom,board-id = <0x03020019 0>; +}; + +/* For Voice Call Use-Case*/ +&dai_pri_auxpcm { + qcom,msm-cpudai-auxpcm-sync = <0>, <0>; + qcom,msm-cpudai-auxpcm-pcm-clk-rate = <0>, <0>; +}; + +&soc { + qcom,diag { + compatible = "qcom,diag"; + qcom,usb-enabled; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/sa6155p-v2-adp-air-overlay.dts b/arch/arm64/boot/dts/qcom/sa6155p-v2-adp-air-overlay.dts index 939f28f35fe8a936a7bee9e42339055944fe4f65..773d186e07b1b6b9968b4c3826cf1a97f6105c05 100644 --- a/arch/arm64/boot/dts/qcom/sa6155p-v2-adp-air-overlay.dts +++ b/arch/arm64/boot/dts/qcom/sa6155p-v2-adp-air-overlay.dts @@ -17,6 +17,7 @@ #include #include "sa6155-adp-air.dtsi" +#include "sa6155-display-shd.dtsi" / { model = "Qualcomm Technologies, Inc. SA6155P ADP-AIR V2"; diff --git a/arch/arm64/boot/dts/qcom/sa6155p-v2-adp-star-overlay.dts b/arch/arm64/boot/dts/qcom/sa6155p-v2-adp-star-overlay.dts index c2d16fa9c655d5be8049e02d731a1c65eeeac33c..66a8058f4e6a855c0425f4a2c5c1115ebd76e2bc 100644 --- a/arch/arm64/boot/dts/qcom/sa6155p-v2-adp-star-overlay.dts +++ b/arch/arm64/boot/dts/qcom/sa6155p-v2-adp-star-overlay.dts @@ -1,4 +1,4 @@ -/* Copyright (c) 2019, The Linux Foundation. All rights reserved. +/* Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -17,6 +17,7 @@ #include #include "sa6155-adp-star.dtsi" +#include "sa6155-display-shd.dtsi" / { model = "Qualcomm Technologies, Inc. SA6155P ADP-STAR V2"; diff --git a/arch/arm64/boot/dts/qcom/sa6155p-vm.dtsi b/arch/arm64/boot/dts/qcom/sa6155p-vm.dtsi index dba72305096dfb90ea36ac6198958982247c3cd4..a3702664fe94faf65a5673b00f0cc9db354b1653 100644 --- a/arch/arm64/boot/dts/qcom/sa6155p-vm.dtsi +++ b/arch/arm64/boot/dts/qcom/sa6155p-vm.dtsi @@ -150,6 +150,9 @@ "csr_hclk"; number-of-rate-detectors = <2>; rate-detector-interfaces = <0 1>; + iommus = <&apps_smmu 0x035C 0x1>; + qcom,smmu-s1-bypass; + qcom,iova-mapping = <0x0 0xFFFFFFFF>; sdr0: qcom,hs0_i2s { compatible = "qcom,hsi2s-interface"; diff --git a/arch/arm64/boot/dts/qcom/sa6155p.dtsi b/arch/arm64/boot/dts/qcom/sa6155p.dtsi index a3f4dfdf57ec1896842a1948d9c87bf632d7fb74..efbac69280c73d9de2e2e8ccc39938ac1283d7d4 100644 --- a/arch/arm64/boot/dts/qcom/sa6155p.dtsi +++ b/arch/arm64/boot/dts/qcom/sa6155p.dtsi @@ -12,7 +12,6 @@ #include "sm6150.dtsi" #include "sa6155-pmic.dtsi" -#include "sa6155-display.dtsi" #include "sa6155p-camera.dtsi" #include "sa6155p-camera-sensor.dtsi" @@ -595,6 +594,10 @@ }; }; +&kgsl_msm_iommu { + qcom,secure-size = <0x20000000>; /* 512MB */ +}; + /* Audio device tree */ #include "sa6155-audio.dtsi" #include "sa6155-pcie.dtsi" diff --git a/arch/arm64/boot/dts/qcom/sa8155-adp-star-display-shd.dtsi b/arch/arm64/boot/dts/qcom/sa8155-adp-star-display-shd.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..86da51997ea21c5e6366e6d66eb45c22b0738318 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sa8155-adp-star-display-shd.dtsi @@ -0,0 +1,82 @@ +/* Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +&soc { + sde_sh_base0: qcom,shared-display-base@0 { + qcom,shared-display-base-intf = <1>; + }; + + sde_sh0: qcom,shared-display@0 { + compatible = "qcom,shared-display"; + qcom,shared-display-base = <&sde_sh_base0>; + qcom,blend-stage-range = <0 8>; + qcom,shared-display-name = "shared-disp-0"; + qcom,display-type = "primary"; + }; + + sde_sh1: qcom,shared-display@1 { + compatible = "qcom,shared-display"; + qcom,shared-display-base = <&sde_sh_base0>; + qcom,blend-stage-range = <8 1>; + qcom,shared-display-name = "shared-disp-1"; + }; + + sde_sh2: qcom,shared-display@2 { + compatible = "qcom,shared-display"; + qcom,shared-display-base = <&sde_sh_base0>; + qcom,blend-stage-range = <9 1>; + qcom,shared-display-name = "shared-disp-2"; + }; + + sde_shp: qcom,sde-shared-plane { + compatible = "qcom,sde-shared-plane"; + qcom,add-planes { + plane@0 { + qcom,plane-name = "plane-4-splash"; + qcom,plane-parent = "plane-4"; + qcom,plane-init-active; + }; + plane@1 { + qcom,plane-name = "plane-3-splash"; + qcom,plane-parent = "plane-3"; + qcom,plane-init-active; + qcom,plane-init-handoff; + }; + }; + }; + + sde_card1: qcom,sde-kms-lease@0 { + compatible = "qcom,sde-kms-lease"; + qcom,dev-name = "msm_drm"; + }; + + sde_card2: qcom,sde-kms-lease@1 { + compatible = "qcom,sde-kms-lease"; + qcom,dev-name = "msm_drm2"; + qcom,lease-connectors = "shared-disp-2"; + qcom,lease-planes = "plane-4-splash"; + }; + + sde_card3: qcom,sde-kms-lease@2 { + compatible = "qcom,sde-kms-lease"; + qcom,dev-name = "msm_drm3"; + qcom,lease-connectors = "shared-disp-1"; + qcom,lease-planes = "plane-3-splash"; + }; +}; + +&mdss_mdp { + qcom,sde-reg-dma-version = <0>; + connectors = <&dsi_dp1 &dsi_dp2 &sde_dp &sde_wb + &sde_sh0 &sde_sh1 &sde_sh2 &sde_shp + &sde_card1 &sde_card2 &sde_card3>; +}; diff --git a/arch/arm64/boot/dts/qcom/sa8155-adp-star-display.dtsi b/arch/arm64/boot/dts/qcom/sa8155-adp-star-display.dtsi index 11a8b3eaa11433bf6532991b2875d673cf5078cb..6ff776ec9cd7644e3ef44752b8de14d3e71a1485 100644 --- a/arch/arm64/boot/dts/qcom/sa8155-adp-star-display.dtsi +++ b/arch/arm64/boot/dts/qcom/sa8155-adp-star-display.dtsi @@ -44,6 +44,7 @@ qcom,ext-disp = <&ext_disp>; qcom,dp-hpd-gpio = <&ioexp 8 0>; qcom,mst-fixed-topology-ports = <1 2>; + qcom,widebus-enable; qcom,core-supply-entries { #address-cells = <1>; @@ -304,77 +305,10 @@ compatible = "qcom,msm-ext-disp-audio-codec-rx"; }; }; - - sde_sh_base0: qcom,shared-display-base@0 { - qcom,shared-display-base-intf = <1>; - }; - - - sde_sh0: qcom,shared-display@0 { - compatible = "qcom,shared-display"; - qcom,shared-display-base = <&sde_sh_base0>; - qcom,blend-stage-range = <0 8>; - qcom,shared-display-name = "shared-disp-0"; - qcom,display-type = "primary"; - }; - - sde_sh1: qcom,shared-display@1 { - compatible = "qcom,shared-display"; - qcom,shared-display-base = <&sde_sh_base0>; - qcom,blend-stage-range = <8 1>; - qcom,shared-display-name = "shared-disp-1"; - }; - - sde_sh2: qcom,shared-display@2 { - compatible = "qcom,shared-display"; - qcom,shared-display-base = <&sde_sh_base0>; - qcom,blend-stage-range = <9 1>; - qcom,shared-display-name = "shared-disp-2"; - }; - - sde_shp: qcom,sde-shared-plane { - compatible = "qcom,sde-shared-plane"; - qcom,add-planes { - plane@0 { - qcom,plane-name = "plane-4-splash"; - qcom,plane-parent = "plane-4"; - qcom,plane-init-active; - }; - plane@1 { - qcom,plane-name = "plane-3-splash"; - qcom,plane-parent = "plane-3"; - qcom,plane-init-active; - qcom,plane-init-handoff; - }; - }; - }; - - sde_card1: qcom,sde-kms-lease@0 { - compatible = "qcom,sde-kms-lease"; - qcom,dev-name = "msm_drm"; - }; - - sde_card2: qcom,sde-kms-lease@1 { - compatible = "qcom,sde-kms-lease"; - qcom,dev-name = "msm_drm2"; - qcom,lease-connectors = "shared-disp-2"; - qcom,lease-planes = "plane-4-splash"; - }; - - sde_card3: qcom,sde-kms-lease@2 { - compatible = "qcom,sde-kms-lease"; - qcom,dev-name = "msm_drm3"; - qcom,lease-connectors = "shared-disp-1"; - qcom,lease-planes = "plane-3-splash"; - }; }; &mdss_mdp { sde-vdd-supply = <&mdss_core_gdsc>; - qcom,sde-reg-dma-version = <0>; - connectors = <&dsi_dp1 &dsi_dp2 &sde_dp &sde_wb - &sde_sh0 &sde_sh1 &sde_sh2 &sde_shp - &sde_card1 &sde_card2 &sde_card3>; qcom,platform-supply-entries { #address-cells = <1>; #size-cells = <0>; diff --git a/arch/arm64/boot/dts/qcom/sa8155-adp-star-overlay.dts b/arch/arm64/boot/dts/qcom/sa8155-adp-star-overlay.dts index 4dd522713b208e94605e1e694da277dd44285f4f..f1f978abe8d9b4a75ccf19257da6ced48881678d 100644 --- a/arch/arm64/boot/dts/qcom/sa8155-adp-star-overlay.dts +++ b/arch/arm64/boot/dts/qcom/sa8155-adp-star-overlay.dts @@ -1,4 +1,4 @@ -/* Copyright (c) 2018, The Linux Foundation. All rights reserved. +/* Copyright (c) 2018, 2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -16,6 +16,7 @@ #include #include "sa8155-adp-star.dtsi" +#include "sa8155-adp-star-display-shd.dtsi" / { model = "ADP-STAR"; diff --git a/arch/arm64/boot/dts/qcom/sa8155-capture.dtsi b/arch/arm64/boot/dts/qcom/sa8155-capture.dtsi index ce36343a5d8de93bd0e280e08335b63caa9662c0..79d89c08fa295cfd5b9d3b1f44d3815004d0c975 100644 --- a/arch/arm64/boot/dts/qcom/sa8155-capture.dtsi +++ b/arch/arm64/boot/dts/qcom/sa8155-capture.dtsi @@ -64,6 +64,209 @@ qcom,dump-size = <0x5000>; }; }; + + CPU1: cpu@100 { + device_type = "cpu"; + compatible = "arm,armv8"; + reg = <0x0 0x100>; + enable-method = "psci"; + next-level-cache = <&L2_1>; + #cooling-cells = <0x2>; + + L2_1: l2-cache { + compatible = "arm,arch-cache"; + cache-level = <0x2>; + next-level-cache = <&L3_0>; + }; + + L1_I_100: l1-icache { + compatible = "arm,arch-cache"; + qcom,dump-size = <0x8800>; + }; + + L1_D_100: l1-dcache { + compatible = "arm,arch-cache"; + qcom,dump-size = <0x9000>; + }; + + L2_TLB_100: l2-tlb { + qcom,dump-size = <0x5000>; + }; + }; + + CPU2: cpu@200 { + device_type = "cpu"; + compatible = "arm,armv8"; + reg = <0x0 0x200>; + enable-method = "psci"; + next-level-cache = <&L2_2>; + #cooling-cells = <0x2>; + + L2_2: l2-cache { + compatible = "arm,arch-cache"; + cache-level = <0x2>; + next-level-cache = <&L3_0>; + }; + + L1_I_200: l1-icache { + compatible = "arm,arch-cache"; + qcom,dump-size = <0x8800>; + }; + + L1_D_200: l1-dcache { + compatible = "arm,arch-cache"; + qcom,dump-size = <0x9000>; + }; + + L2_TLB_200: l2-tlb { + qcom,dump-size = <0x5000>; + }; + }; + + CPU3: cpu@300 { + device_type = "cpu"; + compatible = "arm,armv8"; + reg = <0x0 0x300>; + enable-method = "psci"; + next-level-cache = <&L2_3>; + #cooling-cells = <0x2>; + + L2_3: l2-cache { + compatible = "arm,arch-cache"; + cache-level = <0x2>; + next-level-cache = <&L3_0>; + }; + + L1_I_300: l1-icache { + compatible = "arm,arch-cache"; + qcom,dump-size = <0x8800>; + }; + + L1_D_300: l1-dcache { + compatible = "arm,arch-cache"; + qcom,dump-size = <0x9000>; + }; + + L2_TLB_300: l2-tlb { + qcom,dump-size = <0x5000>; + }; + }; + + CPU4: cpu@400 { + device_type = "cpu"; + compatible = "arm,armv8"; + reg = <0x0 0x400>; + enable-method = "psci"; + next-level-cache = <&L2_4>; + #cooling-cells = <0x2>; + + L2_4: l2-cache { + compatible = "arm,arch-cache"; + cache-level = <0x2>; + next-level-cache = <&L3_0>; + }; + + L1_I_400: l1-icache { + compatible = "arm,arch-cache"; + qcom,dump-size = <0x8800>; + }; + + L1_D_400: l1-dcache { + compatible = "arm,arch-cache"; + qcom,dump-size = <0x9000>; + }; + + L2_TLB_400: l2-tlb { + qcom,dump-size = <0x5000>; + }; + }; + + CPU5: cpu@500 { + device_type = "cpu"; + compatible = "arm,armv8"; + reg = <0x0 0x500>; + enable-method = "psci"; + next-level-cache = <&L2_5>; + #cooling-cells = <0x2>; + + L2_5: l2-cache { + compatible = "arm,arch-cache"; + cache-level = <0x2>; + next-level-cache = <&L3_0>; + }; + + L1_I_500: l1-icache { + compatible = "arm,arch-cache"; + qcom,dump-size = <0x8800>; + }; + + L1_D_500: l1-dcache { + compatible = "arm,arch-cache"; + qcom,dump-size = <0x9000>; + }; + + L2_TLB_500: l2-tlb { + qcom,dump-size = <0x5000>; + }; + }; + + CPU6: cpu@600 { + device_type = "cpu"; + compatible = "arm,armv8"; + reg = <0x0 0x600>; + enable-method = "psci"; + next-level-cache = <&L2_6>; + #cooling-cells = <0x2>; + + L2_6: l2-cache { + compatible = "arm,arch-cache"; + cache-level = <0x2>; + next-level-cache = <&L3_0>; + }; + + L1_I_600: l1-icache { + compatible = "arm,arch-cache"; + qcom,dump-size = <0x8800>; + }; + + L1_D_600: l1-dcache { + compatible = "arm,arch-cache"; + qcom,dump-size = <0x9000>; + }; + + L2_TLB_600: l2-tlb { + qcom,dump-size = <0x5000>; + }; + }; + + CPU7: cpu@700 { + device_type = "cpu"; + compatible = "arm,armv8"; + reg = <0x0 0x700>; + enable-method = "psci"; + next-level-cache = <&L2_7>; + #cooling-cells = <0x2>; + + L2_7: l2-cache { + compatible = "arm,arch-cache"; + cache-level = <0x2>; + next-level-cache = <&L3_0>; + }; + + L1_I_700: l1-icache { + compatible = "arm,arch-cache"; + qcom,dump-size = <0x8800>; + }; + + L1_D_700: l1-dcache { + compatible = "arm,arch-cache"; + qcom,dump-size = <0x9000>; + }; + + L2_TLB_700: l2-tlb { + qcom,dump-size = <0x5000>; + }; + }; }; soc: soc { } ; @@ -154,6 +357,25 @@ 0x18100 0x18100 0x18100 0x18100>; }; + qcom,msm-imem@146bf000 { + compatible = "qcom,msm-imem"; + reg = <0x146bf000 0x1000>; + ranges = <0x0 0x146bf000 0x1000>; + #address-cells = <1>; + #size-cells = <1>; + dload_type@1c { + compatible = "qcom,msm-imem-dload-type"; + reg = <0x1c 0x4>; + }; + }; + + restart@c264000 { + compatible = "qcom,pshold"; + reg = <0xc264000 0x4>, + <0x1fd3000 0x4>; + reg-names = "pshold-base", "tcsr-boot-misc-detect"; + }; + apps_rsc: mailbox@18220000 { compatible = "qcom,tcs-drv"; status="ok"; diff --git a/arch/arm64/boot/dts/qcom/sa8155-v2-adp-air-display-lxc-shd.dtsi b/arch/arm64/boot/dts/qcom/sa8155-v2-adp-air-display-lxc-shd.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..6e456fa3870e3ffc21f7de874904449c45d2cbe1 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sa8155-v2-adp-air-display-lxc-shd.dtsi @@ -0,0 +1,114 @@ +/* Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +&soc { + sde_sh_base0: qcom,shared-display-base@0 { + qcom,shared-display-base-intf = <3>; + qcom,shared-display-base-mst; + }; + + sde_sh_base1: qcom,shared-display-base@1 { + qcom,shared-display-base-intf = <1>; + }; + + sde_sh_base2: qcom,shared-display-base@2 { + qcom,shared-display-base-intf = <2>; + }; + + sde_sh_base3: qcom,shared-display-base@3 { + qcom,shared-display-base-intf = <0>; + qcom,shared-display-base-mst; + }; + + sde_sh_base4: qcom,shared-display-base@4 { + qcom,shared-display-base-intf = <0>; + }; + + sde_sh0: qcom,shared-display@0 { + compatible = "qcom,shared-display"; + qcom,shared-display-base = <&sde_sh_base0>; + qcom,blend-stage-range = <0 4>; + qcom,shared-display-name = "shared-disp-0"; + qcom,display-type = "primary"; + }; + + sde_sh1: qcom,shared-display@1 { + compatible = "qcom,shared-display"; + qcom,shared-display-base = <&sde_sh_base1>; + qcom,blend-stage-range = <0 4>; + qcom,display-type = "primary"; + qcom,shared-display-name = "shared-disp-1"; + }; + + sde_sh2: qcom,shared-display@2 { + compatible = "qcom,shared-display"; + qcom,shared-display-base = <&sde_sh_base2>; + qcom,blend-stage-range = <0 4>; + qcom,display-type = "primary"; + qcom,shared-display-name = "shared-disp-2"; + }; + + sde_sh3: qcom,shared-display@3 { + compatible = "qcom,shared-display"; + qcom,shared-display-base = <&sde_sh_base3>; + qcom,blend-stage-range = <0 4>; + qcom,shared-display-name = "shared-disp-3"; + qcom,display-type = "primary"; + }; + + sde_sh4: qcom,shared-display@4 { + compatible = "qcom,shared-display"; + qcom,shared-display-base = <&sde_sh_base4>; + qcom,blend-stage-range = <0 4>; + qcom,shared-display-name = "shared-disp-4"; + qcom,display-type = "primary"; + }; + + sde_card1: qcom,sde-kms-lease@0 { + compatible = "qcom,sde-kms-lease"; + qcom,dev-name = "msm_drm"; + qcom,lease-connectors = "shared-disp-1"; + qcom,lease-planes = "plane-0", "plane-1", + "plane-12", "plane-13"; + }; + + sde_card2: qcom,sde-kms-lease@1 { + compatible = "qcom,sde-kms-lease"; + qcom,dev-name = "msm_drm"; + qcom,lease-connectors = "shared-disp-2"; + qcom,lease-planes = "plane-2", "plane-3", + "plane-14", "plane-15"; + }; + + sde_card3: qcom,sde-kms-lease@2 { + compatible = "qcom,sde-kms-lease"; + qcom,dev-name = "msm_drm"; + qcom,lease-connectors = "shared-disp-3","shared-disp-4"; + qcom,lease-planes = "plane-4", "plane-5", + "plane-8", "plane-9"; + }; + + sde_card4: qcom,sde-kms-lease@3 { + compatible = "qcom,sde-kms-lease"; + qcom,dev-name = "msm_drm"; + qcom,lease-connectors = "shared-disp-0"; + qcom,lease-planes = "plane-6", "plane-7", + "plane-10", "plane-11"; + }; +}; + +&mdss_mdp { + qcom,sde-reg-dma-version = <0>; + connectors = <&dsi_dp1 &dsi_dp2 &sde_dp &sde_wb + &sde_sh0 &sde_sh1 &sde_sh2 &sde_sh3 &sde_sh4 + &sde_card1 &sde_card2 &sde_card3 &sde_card4>; +}; diff --git a/arch/arm64/boot/dts/qcom/sa8155-v2-adp-air-lxc-overlay.dts b/arch/arm64/boot/dts/qcom/sa8155-v2-adp-air-lxc-overlay.dts new file mode 100644 index 0000000000000000000000000000000000000000..f9b084ca57d668d7d0efba35fe770e8372233bf6 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sa8155-v2-adp-air-lxc-overlay.dts @@ -0,0 +1,25 @@ +/* Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/dts-v1/; +/plugin/; + +#include "sa8155-adp-common.dtsi" +#include "sa8155-adp-star-display.dtsi" +#include "sa8155-v2-adp-air-display-lxc-shd.dtsi" + +/ { + model = "ADP-AIR"; + compatible = "qcom,sa8155-v2-adp-air", "qcom,sa8155", + "qcom,adp-air"; + qcom,board-id = <0X01000019 0>; +}; diff --git a/arch/arm64/boot/dts/qcom/sa8155-v2-adp-air-overlay.dts b/arch/arm64/boot/dts/qcom/sa8155-v2-adp-air-overlay.dts index 8c51bb3a1019ba4294b97f03e5b42d8b0af13fc5..231a3bd56b53bdc983de885d6c087816936d5ce6 100644 --- a/arch/arm64/boot/dts/qcom/sa8155-v2-adp-air-overlay.dts +++ b/arch/arm64/boot/dts/qcom/sa8155-v2-adp-air-overlay.dts @@ -1,4 +1,4 @@ -/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. +/* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -15,6 +15,7 @@ #include "sa8155-adp-common.dtsi" #include "sa8155-adp-star-display.dtsi" +#include "sa8155-adp-star-display-shd.dtsi" / { model = "ADP-AIR"; diff --git a/arch/arm64/boot/dts/qcom/sa8155-vm-la.dtsi b/arch/arm64/boot/dts/qcom/sa8155-vm-la.dtsi index 9c965a0fe57bb987d5496e92b6a4b68ba835a48f..611ade1e8ebd286c70d1859183786b143fd7ac78 100644 --- a/arch/arm64/boot/dts/qcom/sa8155-vm-la.dtsi +++ b/arch/arm64/boot/dts/qcom/sa8155-vm-la.dtsi @@ -104,9 +104,9 @@ qcom,use-ipa-tethering-bridge; qcom,use-64-bit-dma-mask; qcom,arm-smmu; - qcom,smmu-fast-map; qcom,use-ipa-pm; qcom,bandwidth-vote-for-ipa; + qcom,register-collection-on-crash; qcom,manual-fw-load; qcom,msm-bus,name = "ipa"; qcom,msm-bus,num-cases = <5>; @@ -152,13 +152,12 @@ compatible = "qcom,ipa-smmu-ap-cb"; iommus = <&apps_smmu 0x520 0x0>; qcom,iova-mapping = <0x20000000 0x40000000>; - qcom,smmu-s1-bypass; + qcom,smmu-fast-map; }; ipa_smmu_wlan: ipa_smmu_wlan { compatible = "qcom,ipa-smmu-wlan-cb"; iommus = <&apps_smmu 0x521 0x0>; - qcom,smmu-s1-bypass; qcom,additional-mapping = /* ipa-uc ram */ <0x1E60000 0x1E60000 0x80000>; @@ -167,7 +166,6 @@ ipa_smmu_uc: ipa_smmu_uc { compatible = "qcom,ipa-smmu-uc-cb"; iommus = <&apps_smmu 0x522 0x0>; - qcom,smmu-s1-bypass; qcom,iova-mapping = <0x40400000 0x1FC00000>; }; diff --git a/arch/arm64/boot/dts/qcom/sa8155-vm-qupv3.dtsi b/arch/arm64/boot/dts/qcom/sa8155-vm-qupv3.dtsi index 2b7dffe4a33e20a98e4286e4f5a2908d74eaf133..d1d7feac1d0facb7777dc53c21dcf36d74fb7436 100644 --- a/arch/arm64/boot/dts/qcom/sa8155-vm-qupv3.dtsi +++ b/arch/arm64/boot/dts/qcom/sa8155-vm-qupv3.dtsi @@ -1,4 +1,4 @@ -/* Copyright (c) 2019, The Linux Foundation. All rights reserved. +/* Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -317,6 +317,28 @@ status = "disabled"; }; + /* 4-wire UART */ + qupv3_se4_4uart: qcom,qup_uart@890000 { + compatible = "qcom,msm-geni-serial-hs"; + reg = <0x890000 0x4000>; + reg-names = "se_phys"; + clock-names = "se-clk", "m-ahb", "s-ahb"; + clocks = <&clock_virt GCC_QUPV3_WRAP0_S4_CLK>, + <&clock_virt GCC_QUPV3_WRAP_0_M_AHB_CLK>, + <&clock_virt GCC_QUPV3_WRAP_0_S_AHB_CLK>; + pinctrl-names = "default", "active", "sleep"; + pinctrl-0 = <&qupv3_se4_default_ctsrtsrx>, + <&qupv3_se4_default_tx>; + pinctrl-1 = <&qupv3_se4_ctsrx>, <&qupv3_se4_rts>, + <&qupv3_se4_tx>; + pinctrl-2 = <&qupv3_se4_ctsrx>, <&qupv3_se4_rts>, + <&qupv3_se4_tx>; + interrupts = ; + qcom,wrapper-core = <&qupv3_0>; + qcom,wakeup-byte = <0xFD>; + status = "disabled"; + }; + /* QUPv3 North & East Instances * North 0 : SE 8 * North 1 : SE 9 diff --git a/arch/arm64/boot/dts/qcom/sa8155-vm.dtsi b/arch/arm64/boot/dts/qcom/sa8155-vm.dtsi index 009d2c30df2900706aea5f500f2b1371a60bda66..7d145642da32943ae18b97be79304c3abb3746d3 100644 --- a/arch/arm64/boot/dts/qcom/sa8155-vm.dtsi +++ b/arch/arm64/boot/dts/qcom/sa8155-vm.dtsi @@ -142,6 +142,10 @@ interrupts = ; number-of-rate-detectors = <2>; rate-detector-interfaces = <0 1>; + iommus = <&apps_smmu 0x1B5C 0x1>, + <&apps_smmu 0x1B5E 0x0>; + qcom,smmu-s1-bypass; + qcom,iova-mapping = <0x0 0xFFFFFFFF>; sdr0: qcom,hs0_i2s { compatible = "qcom,hsi2s-interface"; diff --git a/arch/arm64/boot/dts/qcom/sa8155.dtsi b/arch/arm64/boot/dts/qcom/sa8155.dtsi index 18f6d27a9d83810a14f52524aee0f0c05eb30708..7ecaf4e1c330f15a9a366c56cc77a0b4dada1a8a 100644 --- a/arch/arm64/boot/dts/qcom/sa8155.dtsi +++ b/arch/arm64/boot/dts/qcom/sa8155.dtsi @@ -411,6 +411,10 @@ interrupts = ; number-of-rate-detectors = <2>; rate-detector-interfaces = <0 1>; + iommus = <&apps_smmu 0x1B5C 0x1>, + <&apps_smmu 0x1B5E 0x0>; + qcom,smmu-s1-bypass; + qcom,iova-mapping = <0x0 0xFFFFFFFF>; sdr0: qcom,hs0_i2s { compatible = "qcom,hsi2s-interface"; @@ -422,9 +426,6 @@ pinctrl-1 = <&hs1_i2s_mclk_sleep &hs1_i2s_sck_sleep &hs1_i2s_ws_sleep &hs1_i2s_data0_sleep &hs1_i2s_data1_sleep>; - iommus = <&apps_smmu 0x1B5C 0x0>; - qcom,smmu-s1-bypass; - qcom,iova-mapping = <0x0 0xFFFFFFFF>; bit-clock-hz = <12288000>; data-buffer-ms = <10>; bit-depth = <32>; @@ -454,9 +455,6 @@ pinctrl-1 = <&hs2_i2s_mclk_sleep &hs2_i2s_sck_sleep &hs2_i2s_ws_sleep &hs2_i2s_data0_sleep &hs2_i2s_data1_sleep>; - iommus = <&apps_smmu 0x1B5D 0x0>; - qcom,smmu-s1-bypass; - qcom,iova-mapping = <0x0 0xFFFFFFFF>; bit-clock-hz = <12288000>; data-buffer-ms = <10>; bit-depth = <32>; @@ -486,9 +484,6 @@ pinctrl-1 = <&hs3_i2s_mclk_sleep &hs3_i2s_sck_sleep &hs3_i2s_ws_sleep &hs3_i2s_data0_sleep &hs3_i2s_data1_sleep>; - iommus = <&apps_smmu 0x1B5E 0x0>; - qcom,smmu-s1-bypass; - qcom,iova-mapping = <0x0 0xFFFFFFFF>; bit-clock-hz = <12288000>; data-buffer-ms = <10>; bit-depth = <32>; @@ -761,6 +756,10 @@ status="disabled"; }; +&kgsl_msm_iommu { + qcom,secure-size = <0x20000000>; /* 512MB */ +}; + #include "sa8155-audio.dtsi" #include "sa8155-camera.dtsi" #include "sa8155-camera-sensor.dtsi" diff --git a/arch/arm64/boot/dts/qcom/sa8155p-adp-star-overlay.dts b/arch/arm64/boot/dts/qcom/sa8155p-adp-star-overlay.dts index 6c8c6688da1a511a4f3ea6291be5a5df90d5155e..0f63b3a4f05f4761fb1f566b0ecf75a09343652d 100644 --- a/arch/arm64/boot/dts/qcom/sa8155p-adp-star-overlay.dts +++ b/arch/arm64/boot/dts/qcom/sa8155p-adp-star-overlay.dts @@ -1,4 +1,4 @@ -/* Copyright (c) 2018, The Linux Foundation. All rights reserved. +/* Copyright (c) 2018, 2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -16,6 +16,7 @@ #include #include "sa8155-adp-star.dtsi" +#include "sa8155-adp-star-display-shd.dtsi" / { model = "ADP-STAR"; diff --git a/arch/arm64/boot/dts/qcom/sa8155p-v2-adp-air-lxc-overlay.dts b/arch/arm64/boot/dts/qcom/sa8155p-v2-adp-air-lxc-overlay.dts new file mode 100644 index 0000000000000000000000000000000000000000..b6dc2907436952559a59fd4fcff82c1480a8181b --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sa8155p-v2-adp-air-lxc-overlay.dts @@ -0,0 +1,25 @@ +/* Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/dts-v1/; +/plugin/; + +#include "sa8155-adp-common.dtsi" +#include "sa8155-adp-star-display.dtsi" +#include "sa8155-v2-adp-air-display-lxc-shd.dtsi" + +/ { + model = "ADP-AIR"; + compatible = "qcom,sa8155p-v2-adp-air", "qcom,sa8155p", + "qcom,adp-air"; + qcom,board-id = <0x01000019 0>; +}; diff --git a/arch/arm64/boot/dts/qcom/sa8155p-v2-adp-air-overlay.dts b/arch/arm64/boot/dts/qcom/sa8155p-v2-adp-air-overlay.dts index c89b831b1d855e5dbc0d6c173460675a21c1c899..fa82c797ad30cbf41aae2f393c9d88a28a708d1a 100644 --- a/arch/arm64/boot/dts/qcom/sa8155p-v2-adp-air-overlay.dts +++ b/arch/arm64/boot/dts/qcom/sa8155p-v2-adp-air-overlay.dts @@ -1,4 +1,4 @@ -/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. +/* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -15,6 +15,7 @@ #include "sa8155-adp-common.dtsi" #include "sa8155-adp-star-display.dtsi" +#include "sa8155-adp-star-display-shd.dtsi" / { model = "ADP-AIR"; diff --git a/arch/arm64/boot/dts/qcom/sa8195-capture.dtsi b/arch/arm64/boot/dts/qcom/sa8195-capture.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..47b31c2b23b084b35dd9721e7d0cfea7e4a8ee2c --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sa8195-capture.dtsi @@ -0,0 +1,577 @@ +/* Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/dts-v1/; + +#include "skeleton64.dtsi" +#include +#include +#include +#include +#include +#include +#include +#include + +/ { + + model = "Qualcomm Technologies, Inc. SDMSHRIKE"; + compatible = "qcom,sdmshrike"; + qcom,msm-name = "SDMSHRIKE"; + qcom,msm-id = <340 0x10000>; + interrupt-parent = <&intc>; + cpus { + #address-cells = <0x2>; + #size-cells = <0x0>; + + CPU0: cpu@0 { + device_type = "cpu"; + compatible = "arm,armv8"; + reg = <0x0 0x0>; + enable-method = "psci"; + next-level-cache = <&L2_0>; + #cooling-cells = <0x2>; + + L2_0: l2-cache { + compatible = "arm,arch-cache"; + cache-level = <0x2>; + next-level-cache = <&L3_0>; + L3_0: l3-cache { + compatible = "arm,arch-cache"; + cache-level = <0x3>; + }; + }; + + L1_I_0: l1-icache { + compatible = "arm,arch-cache"; + qcom,dump-size = <0xa000>; + }; + + L1_D_0: l1-dcache { + compatible = "arm,arch-cache"; + qcom,dump-size = <0xa000>; + }; + }; + + CPU1: cpu@100 { + device_type = "cpu"; + compatible = "arm,armv8"; + reg = <0x0 0x100>; + enable-method = "psci"; + next-level-cache = <&L2_1>; + #cooling-cells = <2>; + L2_1: l2-cache { + compatible = "arm,arch-cache"; + cache-level = <2>; + next-level-cache = <&L3_0>; + }; + L1_I_100: l1-icache { + compatible = "arm,arch-cache"; + qcom,dump-size = <0xa000>; + }; + L1_D_100: l1-dcache { + compatible = "arm,arch-cache"; + qcom,dump-size = <0xa000>; + }; + }; + + CPU2: cpu@200 { + device_type = "cpu"; + compatible = "arm,armv8"; + reg = <0x0 0x200>; + enable-method = "psci"; + next-level-cache = <&L2_2>; + #cooling-cells = <2>; + L2_2: l2-cache { + compatible = "arm,arch-cache"; + cache-level = <2>; + next-level-cache = <&L3_0>; + }; + L1_I_200: l1-icache { + compatible = "arm,arch-cache"; + qcom,dump-size = <0xa000>; + }; + L1_D_200: l1-dcache { + compatible = "arm,arch-cache"; + qcom,dump-size = <0xa000>; + }; + }; + + CPU3: cpu@300 { + device_type = "cpu"; + compatible = "arm,armv8"; + reg = <0x0 0x300>; + enable-method = "psci"; + next-level-cache = <&L2_3>; + #cooling-cells = <2>; + L2_3: l2-cache { + compatible = "arm,arch-cache"; + cache-level = <2>; + next-level-cache = <&L3_0>; + }; + L1_I_300: l1-icache { + compatible = "arm,arch-cache"; + qcom,dump-size = <0xa000>; + }; + L1_D_300: l1-dcache { + compatible = "arm,arch-cache"; + qcom,dump-size = <0xa000>; + }; + }; + + CPU4: cpu@400 { + device_type = "cpu"; + compatible = "arm,armv8"; + reg = <0x0 0x400>; + enable-method = "psci"; + next-level-cache = <&L2_4>; + #cooling-cells = <2>; + L2_4: l2-cache { + compatible = "arm,arch-cache"; + cache-level = <2>; + next-level-cache = <&L3_0>; + }; + L1_I_400: l1-icache { + compatible = "arm,arch-cache"; + qcom,dump-size = <0x14000>; + }; + L1_D_400: l1-dcache { + compatible = "arm,arch-cache"; + qcom,dump-size = <0x14000>; + }; + }; + + CPU5: cpu@500 { + device_type = "cpu"; + compatible = "arm,armv8"; + reg = <0x0 0x500>; + enable-method = "psci"; + next-level-cache = <&L2_5>; + #cooling-cells = <2>; + L2_5: l2-cache { + compatible = "arm,arch-cache"; + cache-level = <2>; + next-level-cache = <&L3_0>; + }; + L1_I_500: l1-icache { + compatible = "arm,arch-cache"; + qcom,dump-size = <0x14000>; + }; + L1_D_500: l1-dcache { + compatible = "arm,arch-cache"; + qcom,dump-size = <0x14000>; + }; + }; + + CPU6: cpu@600 { + device_type = "cpu"; + compatible = "arm,armv8"; + reg = <0x0 0x600>; + enable-method = "psci"; + next-level-cache = <&L2_6>; + #cooling-cells = <2>; + L2_6: l2-cache { + compatible = "arm,arch-cache"; + cache-level = <2>; + next-level-cache = <&L3_0>; + }; + L1_I_600: l1-icache { + compatible = "arm,arch-cache"; + qcom,dump-size = <0x14000>; + }; + L1_D_600: l1-dcache { + compatible = "arm,arch-cache"; + qcom,dump-size = <0x14000>; + }; + }; + + CPU7: cpu@700 { + device_type = "cpu"; + compatible = "arm,armv8"; + reg = <0x0 0x700>; + enable-method = "psci"; + next-level-cache = <&L2_7>; + #cooling-cells = <2>; + L2_7: l2-cache { + compatible = "arm,arch-cache"; + cache-level = <2>; + next-level-cache = <&L3_0>; + }; + L1_I_700: l1-icache { + compatible = "arm,arch-cache"; + qcom,dump-size = <0x14000>; + }; + L1_D_700: l1-dcache { + compatible = "arm,arch-cache"; + qcom,dump-size = <0x14000>; + }; + }; + }; + + soc: soc { } ; + chosen { + bootargs = "rcupdate.rcu_expedited=1 rcu_nocbs=0-7"; + }; + + aliases { + serial0 = &qupv3_se12_2uart; + serial1 = "/soc/qcom,qup_uart@0xa90000"; + }; + + memory { + device_type = "memory"; + reg = <0x1 0x40000000 0x0 0x20000000>; + }; + + psci { + compatible = "arm,psci-1.0"; + method = "smc"; + }; +}; + +#include "sdmshrike-gdsc.dtsi" + +&soc { + status = "ok"; + #address-cells = <0x1>; + #size-cells = <0x1>; + ranges = <0x0 0x0 0x0 0xffffffff>; + compatible = "simple-bus"; + + intc: interrupt-controller@17a00000 { + compatible = "arm,gic-v3"; + #interrupt-cells = <3>; + interrupt-controller; + #redistributor-regions = <1>; + redistributor-stride = <0x0 0x20000>; + reg = <0x17a00000 0x10000>, /* GICD */ + <0x17a60000 0x100000>; /* GICR * 8 */ + interrupts = <1 9 4>; + interrupt-parent = <&intc>; + }; + + clock_gcc: qcom,gcc@100000 { + compatible = "qcom,gcc-sdmshrike", "syscon"; + reg = <0x100000 0x1f0000>; + reg-names = "cc_base"; + vdd_cx-supply = <&VDD_CX_LEVEL>; + vdd_cx_ao-supply = <&VDD_CX_LEVEL_AO>; + vdd_mm-supply = <&VDD_MMCX_LEVEL>; + #clock-cells = <0x1>; + #reset-cells = <0x1>; + }; + + timer { + compatible = "arm,armv8-timer"; + interrupts = <1 1 0xf08>, + <1 2 0xf08>, + <1 3 0xf08>, + <1 0 0xf08>; + clock-frequency = <19200000>; + }; + + timer@0x17c20000 { + #address-cells = <0x1>; + #size-cells = <0x1>; + ranges; + compatible = "arm,armv7-timer-mem"; + reg = <0x17c20000 0x1000>; + clock-frequency = <19200000>; + + frame@0x17c21000 { + frame-number = <0>; + interrupts = <0 7 0x4>, + <0 6 0x4>; + reg = <0x17c21000 0x1000>, + <0x17c22000 0x1000>; + }; + }; + + wdog: qcom,wdt@17c10000 { + compatible = "qcom,msm-watchdog"; + reg = <0x17c10000 0x1000>; + reg-names = "wdt-base"; + interrupts = <0 0 0>, <0 1 0>; + qcom,bark-time = <11000>; + qcom,pet-time = <9360>; + qcom,ipi-ping; + qcom,wakeup-enable; + }; + + + qcom,msm-imem@146bf000 { + compatible = "qcom,msm-imem"; + reg = <0x146bf000 0x1000>; + ranges = <0x0 0x146bf000 0x1000>; + #address-cells = <1>; + #size-cells = <1>; + dload_type@1c { + compatible = "qcom,msm-imem-dload-type"; + reg = <0x1c 0x4>; + }; + }; + + restart@c264000 { + compatible = "qcom,pshold"; + reg = <0xc264000 0x4>, + <0x1fd3000 0x4>; + reg-names = "pshold-base", "tcsr-boot-misc-detect"; + }; + + apps_rsc: mailbox@18220000 { + compatible = "qcom,tcs-drv"; + status="ok"; + label = "apps_rsc"; + reg = <0x18220000 0x100>, <0x18220d00 0x3000>; + interrupts = ; + #mbox-cells = <1>; + qcom,drv-id = <2>; + qcom,tcs-config = , + , + , + ; + }; + + clock_rpmh: qcom,rpmhclk { + compatible = "qcom,rpmh-clk-sdmshrike"; + mboxes = <&apps_rsc 0>; + mbox-names = "apps"; + #clock-cells = <1>; + }; + + disp_rsc: mailbox@af20000 { + compatible = "qcom,tcs-drv"; + label = "display_rsc"; + reg = <0xaf20000 0x100>, <0xaf21c00 0x3000>; + interrupts = <0 129 0>; + #mbox-cells = <1>; + qcom,drv-id = <0>; + qcom,tcs-config = , + , + , + ; + status = "disabled"; + }; + + qmp_aop: qcom,qmp-aop@c300000 { + compatible = "qcom,qmp-mbox"; + reg = <0xc300000 0x1000>, <0x17c0000C 0x4>; + reg-names = "msgram", "irq-reg-base"; + qcom,irq-mask = <0x1>; + interrupts = ; + + label = "aop"; + qcom,early-boot; + priority = <0>; + mbox-desc-offset = <0x0>; + #mbox-cells = <1>; + }; + + cmd_db: qcom,cmd-db@c3f000c { + compatible = "qcom,cmd-db"; + reg = <0xc3f000c 8>; + }; + + qupv3_1: qcom,qupv3_1_geni_se@ac0000 { + compatible = "qcom,qupv3-geni-se"; + reg = <0xac0000 0x6000>; + qcom,bus-mas-id = ; + qcom,bus-slv-id = ; + qcom,iommu-s1-bypass; + + iommu_qupv3_1_geni_se_cb: qcom,iommu_qupv3_1_geni_se_cb { + compatible = "qcom,qupv3-geni-se-cb"; + iommus = <&apps_smmu 0x603 0x0>; + }; + + }; + /* 2-wire UART */ + + /* Debug UART Instance for CDP/MTP platform */ + qupv3_se12_2uart: qcom,qup_uart@0xa90000 { + compatible = "qcom,msm-geni-console"; + reg = <0xa90000 0x4000>; + reg-names = "se_phys"; + clock-names = "se-clk", "m-ahb", "s-ahb"; + clocks = <&clock_gcc GCC_QUPV3_WRAP1_S4_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&qupv3_se12_2uart_active>; + pinctrl-1 = <&qupv3_se12_2uart_sleep>; + interrupts = ; + qcom,wrapper-core = <&qupv3_1>; + status = "ok"; + }; + + apps_smmu: apps-smmu@0x15000000 { + compatible = "qcom,qsmmu-v500"; + reg = <0x15000000 0x100000>, + <0x15182000 0x20>; + reg-names = "base", "tcu-base"; + #iommu-cells = <2>; + qcom,skip-init; + qcom,use-3-lvl-tables; + qcom,disable-atos; + #global-interrupts = <1>; + #size-cells = <1>; + #address-cells = <1>; + ranges; + interrupts = , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + qcom,msm-bus,name = "apps_smmu"; + qcom,msm-bus,num-cases = <2>; + qcom,msm-bus,active-only; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = + , + , + <0 0>, + , + , + <0 1000>; + }; + +}; + + + +#include "sdmshrike-regulators.dtsi" +#include "sdmshrike-pinctrl.dtsi" +#include "sdmshrike-bus.dtsi" diff --git a/arch/arm64/boot/dts/qcom/sa8195-vm-lv-cnss-lxc.dtsi b/arch/arm64/boot/dts/qcom/sa8195-vm-lv-cnss-lxc.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..ea8a3e9f52eac9044a328c694bdd0613a6fa4177 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sa8195-vm-lv-cnss-lxc.dtsi @@ -0,0 +1,601 @@ +/* Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +&pcie2_msi { + status = "ok"; +}; + +&pcie2 { + status = "ok"; +}; + +&cnss_pins { + + cnss_wlan_en_active2: cnss_wlan_en_active2 { + mux { + pins = "gpio170"; + function = "gpio"; + }; + + config { + pins = "gpio170"; + drive-strength = <16>; + output-high; + bias-pull-up; + }; + }; + cnss_wlan_en_sleep2: cnss_wlan_en_sleep2 { + mux { + pins = "gpio170"; + function = "gpio"; + }; + + config { + pins = "gpio170"; + drive-strength = <2>; + output-low; + bias-pull-down; + }; + }; +}; + +&cnss_pcie { + status = "disabled"; +}; + +&soc { + /* Support Dual Hastings case*/ + cnss_pcie0: qcom,cnss-qca-converged0 { + compatible = "qcom,cnss-qca-converged"; + + qcom,converged-dt; + qcom,wlan-rc-num = <0>; + qcom,bus-type=<0>; + qcom,qrtr_node_id = <0x10>; + qcom,notify-modem-status; + qcom,msm-bus,name = "msm-cnss"; + qcom,msm-bus,num-cases = <6>; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = + /* no vote */ + , + /* idle: 0-18 Mbps, ddr freq: 100 MHz */ + , + /* low: 18-60 Mbps, ddr freq: 200 MHz*/ + , + /* medium: 60-240 Mbps, ddr freq: 451.2 MHz */ + , + /* high: 240 - 800 Mbps, ddr freq: 451.2 MHz */ + , + /* very high: 800 - 1400 Mbps, ddr freq: 1555.2 MHz */ + ; + + #address-cells=<1>; + #size-cells=<1>; + ranges = <0x10000000 0x10000000 0x10000000>, + <0x20000000 0x20000000 0x10000>, + <0xa0000000 0xa0000000 0x10000000>, + <0xb0000000 0xb0000000 0x10000>; + + vdd-wlan-ctrl1-supply = <&vreg_conn_pa>; + vdd-wlan-ctrl2-supply = <&vreg_conn_1p8>; + vdd-wlan-supply = <&vreg_wlan>; + vdd-wlan-aon-supply = <&pm8195_1_s5>; + vdd-wlan-rfa1-supply = <&pm8195_1_s2>; + vdd-wlan-rfa2-supply = <&pm8195_2_s5>; + vdd-wlan-rfa3-supply = <&pm8195_2_l7>; + + wlan_vregs = "vdd-wlan-ctrl1", "vdd-wlan-ctrl2"; + qcom,vdd-wlan-ctrl1-info = <0 0 0 0>; + qcom,vdd-wlan-ctrl2-info = <0 0 0 0>; + wlan-en-gpio = <&tlmm 169 0>; + pinctrl-names = "wlan_en_active", "wlan_en_sleep"; + pinctrl-0 = <&cnss_wlan_en_active>; + pinctrl-1 = <&cnss_wlan_en_sleep>; + + chip_cfg@0 { + reg = <0x10000000 0x10000000>, + <0x20000000 0x10000>; + reg-names = "smmu_iova_base", "smmu_iova_ipa"; + + supported-ids = <0x003e>; + wlan_vregs = "vdd-wlan"; + qcom,vdd-wlan-info = <0 0 0 10>; + + qcom,smmu-s1-enable; + qcom,wlan-ramdump-dynamic = <0x200000>; + }; + + chip_cfg@1 { + reg = <0xa0000000 0x10000000>, + <0xb0000000 0x10000>; + reg-names = "smmu_iova_base", "smmu_iova_ipa"; + + supported-ids = <0x1101>; + wlan_vregs = "vdd-wlan-aon", "vdd-wlan-rfa1", + "vdd-wlan-rfa2", "vdd-wlan-rfa3"; + qcom,vdd-wlan-aon-info = <1000000 1000000 0 0>; + qcom,vdd-wlan-rfa1-info = <1370000 1370000 0 0>; + qcom,vdd-wlan-rfa2-info = <2040000 2040000 0 0>; + qcom,vdd-wlan-rfa3-info = <1900000 1900000 450000 0>; + qcom,wlan-ramdump-dynamic = <0x400000>; + mhi,max-channels = <30>; + mhi,timeout = <10000>; + + mhi_channels { + #address-cells = <1>; + #size-cells = <0>; + mhi_chan@0 { + reg = <0>; + label = "LOOPBACK"; + mhi,num-elements = <32>; + mhi,event-ring = <1>; + mhi,chan-dir = <1>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x14>; + }; + + mhi_chan@1 { + reg = <1>; + label = "LOOPBACK"; + mhi,num-elements = <32>; + mhi,event-ring = <1>; + mhi,chan-dir = <2>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x14>; + }; + + mhi_chan@4 { + reg = <4>; + label = "DIAG"; + mhi,num-elements = <32>; + mhi,event-ring = <1>; + mhi,chan-dir = <1>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x14>; + }; + + mhi_chan@5 { + reg = <5>; + label = "DIAG"; + mhi,num-elements = <32>; + mhi,event-ring = <1>; + mhi,chan-dir = <2>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x14>; + }; + + mhi_chan@20 { + reg = <20>; + label = "IPCR"; + mhi,num-elements = <32>; + mhi,event-ring = <1>; + mhi,chan-dir = <1>; + mhi,data-type = <1>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x14>; + mhi,auto-start; + }; + + mhi_chan@21 { + reg = <21>; + label = "IPCR"; + mhi,num-elements = <32>; + mhi,event-ring = <1>; + mhi,chan-dir = <2>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x14>; + mhi,auto-queue; + mhi,auto-start; + }; + }; + + mhi_events { + mhi_event@0 { + mhi,num-elements = <32>; + mhi,intmod = <1>; + mhi,msi = <1>; + mhi,priority = <1>; + mhi,brstmode = <2>; + mhi,data-type = <1>; + }; + + mhi_event@1 { + mhi,num-elements = <256>; + mhi,intmod = <1>; + mhi,msi = <2>; + mhi,priority = <1>; + mhi,brstmode = <2>; + }; + }; + }; + + chip_cfg@2 { + reg = <0xa0000000 0x10000000>, + <0xb0000000 0x10000>; + reg-names = "smmu_iova_base", "smmu_iova_ipa"; + + supported-ids = <0x1102>; + wlan_vregs = "vdd-wlan-aon", "vdd-wlan-rfa1", + "vdd-wlan-rfa2", "vdd-wlan-rfa3"; + qcom,vdd-wlan-aon-info = <1000000 1000000 0 0>; + qcom,vdd-wlan-rfa1-info = <1370000 1370000 0 0>; + qcom,vdd-wlan-rfa2-info = <2040000 2040000 0 0>; + qcom,vdd-wlan-rfa3-info = <1900000 1900000 0 0>; + + qcom,wlan-ramdump-dynamic = <0x300000>; + mhi,max-channels = <30>; + mhi,timeout = <10000>; + mhi,ee = <0x3>, <0x4>; + mhi,ee-names = "SBL", "RDDM"; + mhi,bhie-offset = <0x0324>; + + mhi_channels { + #address-cells = <1>; + #size-cells = <0>; + mhi_chan@0 { + reg = <0>; + label = "LOOPBACK"; + mhi,num-elements = <32>; + mhi,event-ring = <1>; + mhi,chan-dir = <1>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x14>; + }; + + mhi_chan@1 { + reg = <1>; + label = "LOOPBACK"; + mhi,num-elements = <32>; + mhi,event-ring = <1>; + mhi,chan-dir = <2>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x14>; + }; + + mhi_chan@4 { + reg = <4>; + label = "DIAG"; + mhi,num-elements = <32>; + mhi,event-ring = <1>; + mhi,chan-dir = <1>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x14>; + }; + + mhi_chan@5 { + reg = <5>; + label = "DIAG"; + mhi,num-elements = <32>; + mhi,event-ring = <1>; + mhi,chan-dir = <2>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x14>; + }; + + mhi_chan@16 { + reg = <16>; + label = "IPCR"; + mhi,num-elements = <32>; + mhi,event-ring = <1>; + mhi,chan-dir = <1>; + mhi,data-type = <1>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x14>; + mhi,auto-start; + }; + + mhi_chan@17 { + reg = <17>; + label = "IPCR"; + mhi,num-elements = <32>; + mhi,event-ring = <1>; + mhi,chan-dir = <2>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x14>; + mhi,auto-queue; + mhi,auto-start; + }; + }; + + mhi_events { + mhi_event@0 { + mhi,num-elements = <32>; + mhi,intmod = <1>; + mhi,msi = <1>; + mhi,priority = <1>; + mhi,brstmode = <2>; + mhi,data-type = <1>; + }; + + mhi_event@1 { + mhi,num-elements = <256>; + mhi,intmod = <1>; + mhi,msi = <2>; + mhi,priority = <1>; + mhi,brstmode = <2>; + }; + }; + }; + }; + + cnss_pcie2: qcom,cnss-qca-converged2 { + compatible = "qcom,cnss-qca-converged"; + + qcom,converged-dt; + qcom,wlan-rc-num = <2>; + qcom,qrtr_node_id = <0x20>; + qcom,bus-type=<0>; + qcom,notify-modem-status; + qcom,msm-bus,name = "msm-cnss"; + qcom,msm-bus,num-cases = <6>; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = + /* no vote */ + , + /* idle: 0-18 Mbps, ddr freq: 100 MHz */ + , + /* low: 18-60 Mbps, ddr freq: 200 MHz*/ + , + /* medium: 60-240 Mbps, ddr freq: 451.2 MHz */ + , + /* high: 240 - 800 Mbps, ddr freq: 451.2 MHz */ + , + /* very high: 800 - 1400 Mbps, ddr freq: 1555.2 MHz */ + ; + + #address-cells=<1>; + #size-cells=<1>; + ranges = <0xc0000000 0xc0000000 0x10000000>, + <0xd0000000 0xd0000000 0x10000>; + + vdd-wlan-ctrl1-supply = <&vreg_conn_pa>; + vdd-wlan-ctrl2-supply = <&vreg_conn_1p8>; + vdd-wlan-supply = <&vreg_wlan>; + vdd-wlan-aon-supply = <&pm8195_1_s5>; + vdd-wlan-rfa1-supply = <&pm8195_1_s2>; + vdd-wlan-rfa2-supply = <&pm8195_2_s5>; + vdd-wlan-rfa3-supply = <&pm8195_2_l7>; + + wlan_vregs = "vdd-wlan-ctrl1", "vdd-wlan-ctrl2"; + qcom,vdd-wlan-ctrl1-info = <0 0 0 0>; + qcom,vdd-wlan-ctrl2-info = <0 0 0 0>; + wlan-en-gpio = <&tlmm 170 0>; + pinctrl-names = "wlan_en_active", "wlan_en_sleep"; + pinctrl-0 = <&cnss_wlan_en_active2>; + pinctrl-1 = <&cnss_wlan_en_sleep2>; + + chip_cfg@0 { + reg = <0xc0000000 0x10000000>, + <0xd0000000 0x10000>; + reg-names = "smmu_iova_base", "smmu_iova_ipa"; + + supported-ids = <0x003e>; + wlan_vregs = "vdd-wlan"; + qcom,vdd-wlan-info = <0 0 0 10>; + + qcom,smmu-s1-enable; + qcom,wlan-ramdump-dynamic = <0x200000>; + }; + + chip_cfg@1 { + reg = <0xc0000000 0x10000000>, + <0xd0000000 0x10000>; + reg-names = "smmu_iova_base", "smmu_iova_ipa"; + + supported-ids = <0x1101>; + wlan_vregs = "vdd-wlan-aon", "vdd-wlan-rfa1", + "vdd-wlan-rfa2", "vdd-wlan-rfa3"; + qcom,vdd-wlan-aon-info = <1000000 1000000 0 0>; + qcom,vdd-wlan-rfa1-info = <1370000 1370000 0 0>; + qcom,vdd-wlan-rfa2-info = <2040000 2040000 0 0>; + qcom,vdd-wlan-rfa3-info = <1900000 1900000 450000 0>; + qcom,wlan-ramdump-dynamic = <0x400000>; + mhi,max-channels = <30>; + mhi,timeout = <10000>; + + mhi_channels { + #address-cells = <1>; + #size-cells = <0>; + mhi_chan@0 { + reg = <0>; + label = "LOOPBACK"; + mhi,num-elements = <32>; + mhi,event-ring = <1>; + mhi,chan-dir = <1>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x14>; + }; + + mhi_chan@1 { + reg = <1>; + label = "LOOPBACK"; + mhi,num-elements = <32>; + mhi,event-ring = <1>; + mhi,chan-dir = <2>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x14>; + }; + + mhi_chan@20 { + reg = <20>; + label = "IPCR"; + mhi,num-elements = <32>; + mhi,event-ring = <1>; + mhi,chan-dir = <1>; + mhi,data-type = <1>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x14>; + mhi,auto-start; + }; + + mhi_chan@21 { + reg = <21>; + label = "IPCR"; + mhi,num-elements = <32>; + mhi,event-ring = <1>; + mhi,chan-dir = <2>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x14>; + mhi,auto-queue; + mhi,auto-start; + }; + }; + + mhi_events { + mhi_event@0 { + mhi,num-elements = <32>; + mhi,intmod = <1>; + mhi,msi = <1>; + mhi,priority = <1>; + mhi,brstmode = <2>; + mhi,data-type = <1>; + }; + + mhi_event@1 { + mhi,num-elements = <256>; + mhi,intmod = <1>; + mhi,msi = <2>; + mhi,priority = <1>; + mhi,brstmode = <2>; + }; + }; + }; + + chip_cfg@2 { + reg = <0xa0000000 0x10000000>, + <0xb0000000 0x10000>; + reg-names = "smmu_iova_base", "smmu_iova_ipa"; + + supported-ids = <0x1102>; + wlan_vregs = "vdd-wlan-aon", "vdd-wlan-rfa1", + "vdd-wlan-rfa2", "vdd-wlan-rfa3"; + qcom,vdd-wlan-aon-info = <1000000 1000000 0 0>; + qcom,vdd-wlan-rfa1-info = <1370000 1370000 0 0>; + qcom,vdd-wlan-rfa2-info = <2040000 2040000 0 0>; + qcom,vdd-wlan-rfa3-info = <1900000 1900000 0 0>; + + qcom,wlan-ramdump-dynamic = <0x300000>; + mhi,max-channels = <30>; + mhi,timeout = <10000>; + mhi,ee = <0x3>, <0x4>; + mhi,ee-names = "SBL", "RDDM"; + mhi,bhie-offset = <0x0324>; + + mhi_channels { + #address-cells = <1>; + #size-cells = <0>; + mhi_chan@0 { + reg = <0>; + label = "LOOPBACK"; + mhi,num-elements = <32>; + mhi,event-ring = <1>; + mhi,chan-dir = <1>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x14>; + }; + + mhi_chan@1 { + reg = <1>; + label = "LOOPBACK"; + mhi,num-elements = <32>; + mhi,event-ring = <1>; + mhi,chan-dir = <2>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x14>; + }; + + mhi_chan@4 { + reg = <4>; + label = "DIAG"; + mhi,num-elements = <32>; + mhi,event-ring = <1>; + mhi,chan-dir = <1>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x14>; + }; + + mhi_chan@5 { + reg = <5>; + label = "DIAG"; + mhi,num-elements = <32>; + mhi,event-ring = <1>; + mhi,chan-dir = <2>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x14>; + }; + + mhi_chan@16 { + reg = <16>; + label = "IPCR"; + mhi,num-elements = <32>; + mhi,event-ring = <1>; + mhi,chan-dir = <1>; + mhi,data-type = <1>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x14>; + mhi,auto-start; + }; + + mhi_chan@17 { + reg = <17>; + label = "IPCR"; + mhi,num-elements = <32>; + mhi,event-ring = <1>; + mhi,chan-dir = <2>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <0x14>; + mhi,auto-queue; + mhi,auto-start; + }; + }; + + mhi_events { + mhi_event@0 { + mhi,num-elements = <32>; + mhi,intmod = <1>; + mhi,msi = <1>; + mhi,priority = <1>; + mhi,brstmode = <2>; + mhi,data-type = <1>; + }; + + mhi_event@1 { + mhi,num-elements = <256>; + mhi,intmod = <1>; + mhi,msi = <2>; + mhi,priority = <1>; + mhi,brstmode = <2>; + }; + }; + }; + }; +}; + diff --git a/arch/arm64/boot/dts/qcom/sa8195-vm-lv-lxc.dtsi b/arch/arm64/boot/dts/qcom/sa8195-vm-lv-lxc.dtsi index 98522181a703583221e882a65841475923a85c82..edbb28657da0417554bc9112e78c11cf6235273e 100644 --- a/arch/arm64/boot/dts/qcom/sa8195-vm-lv-lxc.dtsi +++ b/arch/arm64/boot/dts/qcom/sa8195-vm-lv-lxc.dtsi @@ -38,6 +38,10 @@ status = "ok"; }; +&qupv3_se4_4uart { + status = "ok"; +}; + &usb0 { status = "ok"; }; @@ -58,3 +62,10 @@ /delete-property/ qcom,client-id; qcom,client-id = "7816"; }; + +&linux_cma { + /delete-property/ size; + size = <0x0 0x3c00000>; +}; + +#include "sa8195-vm-lv-cnss-lxc.dtsi" diff --git a/arch/arm64/boot/dts/qcom/sa8195-vm.dtsi b/arch/arm64/boot/dts/qcom/sa8195-vm.dtsi index 3499ccae476f60be821a8dfdc8ed0379763ab865..4ecd8e89ca31b0e680d1c64a27e2a2a3524ee424 100644 --- a/arch/arm64/boot/dts/qcom/sa8195-vm.dtsi +++ b/arch/arm64/boot/dts/qcom/sa8195-vm.dtsi @@ -125,6 +125,8 @@ aliases { sdhc2 = &sdhc_2; /* SDC2 SD Card slot */ + hsuart0 = &qupv3_se13_4uart; + hsuart1 = &qupv3_se4_4uart; }; }; @@ -138,6 +140,10 @@ interrupts = ; number-of-rate-detectors = <2>; rate-detector-interfaces = <0 1>; + iommus = <&apps_smmu 0x1B5C 0x1>, + <&apps_smmu 0x1B5E 0x0>; + qcom,smmu-s1-bypass; + qcom,iova-mapping = <0x0 0xFFFFFFFF>; sdr0: qcom,hs0_i2s { compatible = "qcom,hsi2s-interface"; diff --git a/arch/arm64/boot/dts/qcom/sa8195p-adp-star-display-shd.dtsi b/arch/arm64/boot/dts/qcom/sa8195p-adp-star-display-shd.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..31085356b6dd299aa27a4f9f063cb44f1ea5fad6 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sa8195p-adp-star-display-shd.dtsi @@ -0,0 +1,83 @@ +/* Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +&soc { + sde_sh_base0: qcom,shared-display-base@0 { + qcom,shared-display-base-intf = <1>; + }; + + sde_sh0: qcom,shared-display@0 { + compatible = "qcom,shared-display"; + qcom,shared-display-base = <&sde_sh_base0>; + qcom,blend-stage-range = <0 8>; + qcom,shared-display-name = "shared-disp-0"; + qcom,display-type = "primary"; + }; + + sde_sh1: qcom,shared-display@1 { + compatible = "qcom,shared-display"; + qcom,shared-display-base = <&sde_sh_base0>; + qcom,blend-stage-range = <8 1>; + qcom,shared-display-name = "shared-disp-1"; + }; + + sde_sh2: qcom,shared-display@2 { + compatible = "qcom,shared-display"; + qcom,shared-display-base = <&sde_sh_base0>; + qcom,blend-stage-range = <9 1>; + qcom,shared-display-name = "shared-disp-2"; + }; + + sde_shp: qcom,sde-shared-plane { + compatible = "qcom,sde-shared-plane"; + qcom,add-planes { + plane@0 { + qcom,plane-name = "plane-4-splash"; + qcom,plane-parent = "plane-4"; + qcom,plane-init-active; + }; + plane@1 { + qcom,plane-name = "plane-3-splash"; + qcom,plane-parent = "plane-3"; + qcom,plane-init-active; + qcom,plane-init-handoff; + }; + }; + }; + + sde_card1: qcom,sde-kms-lease@0 { + compatible = "qcom,sde-kms-lease"; + qcom,dev-name = "msm_drm"; + }; + + sde_card2: qcom,sde-kms-lease@1 { + compatible = "qcom,sde-kms-lease"; + qcom,dev-name = "msm_drm2"; + qcom,lease-connectors = "shared-disp-2"; + qcom,lease-planes = "plane-4-splash"; + }; + + sde_card3: qcom,sde-kms-lease@2 { + compatible = "qcom,sde-kms-lease"; + qcom,dev-name = "msm_drm3"; + qcom,lease-connectors = "shared-disp-1"; + qcom,lease-planes = "plane-3-splash"; + }; +}; + +&mdss_mdp { + qcom,sde-crtc-num-pref = <8>; + qcom,sde-reg-dma-version = <0>; + connectors = <&dsi_dp1 &dsi_dp2 &sde_dp0 &sde_dp1 &sde_edp &sde_wb + &sde_sh0 &sde_sh1 &sde_sh2 &sde_shp + &sde_card1 &sde_card2 &sde_card3>; +}; diff --git a/arch/arm64/boot/dts/qcom/sa8195p-adp-star-display.dtsi b/arch/arm64/boot/dts/qcom/sa8195p-adp-star-display.dtsi index 451ebeb708e5067dbd882ef6927ef186c7b10bc3..57c3961998fba3c847839915c602c27221cc2c0f 100644 --- a/arch/arm64/boot/dts/qcom/sa8195p-adp-star-display.dtsi +++ b/arch/arm64/boot/dts/qcom/sa8195p-adp-star-display.dtsi @@ -339,78 +339,11 @@ compatible = "qcom,msm-ext-disp-audio-codec-rx"; }; }; - - sde_sh_base0: qcom,shared-display-base@0 { - qcom,shared-display-base-intf = <1>; - }; - - sde_sh0: qcom,shared-display@0 { - compatible = "qcom,shared-display"; - qcom,shared-display-base = <&sde_sh_base0>; - qcom,blend-stage-range = <0 8>; - qcom,shared-display-name = "shared-disp-0"; - qcom,display-type = "primary"; - }; - - sde_sh1: qcom,shared-display@1 { - compatible = "qcom,shared-display"; - qcom,shared-display-base = <&sde_sh_base0>; - qcom,blend-stage-range = <8 1>; - qcom,shared-display-name = "shared-disp-1"; - }; - - sde_sh2: qcom,shared-display@2 { - compatible = "qcom,shared-display"; - qcom,shared-display-base = <&sde_sh_base0>; - qcom,blend-stage-range = <9 1>; - qcom,shared-display-name = "shared-disp-2"; - }; - - sde_shp: qcom,sde-shared-plane { - compatible = "qcom,sde-shared-plane"; - qcom,add-planes { - plane@0 { - qcom,plane-name = "plane-4-splash"; - qcom,plane-parent = "plane-4"; - qcom,plane-init-active; - }; - plane@1 { - qcom,plane-name = "plane-3-splash"; - qcom,plane-parent = "plane-3"; - qcom,plane-init-active; - qcom,plane-init-handoff; - }; - }; - }; - - sde_card1: qcom,sde-kms-lease@0 { - compatible = "qcom,sde-kms-lease"; - qcom,dev-name = "msm_drm"; - }; - - sde_card2: qcom,sde-kms-lease@1 { - compatible = "qcom,sde-kms-lease"; - qcom,dev-name = "msm_drm2"; - qcom,lease-connectors = "shared-disp-2"; - qcom,lease-planes = "plane-4-splash"; - }; - - sde_card3: qcom,sde-kms-lease@2 { - compatible = "qcom,sde-kms-lease"; - qcom,dev-name = "msm_drm3"; - qcom,lease-connectors = "shared-disp-1"; - qcom,lease-planes = "plane-3-splash"; - }; }; &mdss_mdp { - qcom,sde-crtc-num-pref = <8>; - qcom,sde-reg-dma-version = <0>; qcom,sde-ctl-display-pref = "primary", "none", "none", "none", "none"; qcom,sde-mixer-display-pref = "primary", "none", "none", "none", "none", "none"; - connectors = <&dsi_dp1 &dsi_dp2 &sde_dp0 &sde_dp1 &sde_edp &sde_wb - &sde_sh0 &sde_sh1 &sde_sh2 &sde_shp - &sde_card1 &sde_card2 &sde_card3>; }; diff --git a/arch/arm64/boot/dts/qcom/sa8195p-adp-star-overlay.dts b/arch/arm64/boot/dts/qcom/sa8195p-adp-star-overlay.dts index d7752bd2ecec8cd292eb9caf7d64ee712baa7cee..ea377a53c89fa5275e26092927b1587a16d1c8a7 100644 --- a/arch/arm64/boot/dts/qcom/sa8195p-adp-star-overlay.dts +++ b/arch/arm64/boot/dts/qcom/sa8195p-adp-star-overlay.dts @@ -1,4 +1,4 @@ -/* Copyright (c) 2019, The Linux Foundation. All rights reserved. +/* Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -14,6 +14,7 @@ /plugin/; #include "sa8195p-adp-star.dtsi" +#include "sa8195p-adp-star-display-shd.dtsi" / { model = "ADP-STAR"; diff --git a/arch/arm64/boot/dts/qcom/sa8195p-camera.dtsi b/arch/arm64/boot/dts/qcom/sa8195p-camera.dtsi index b869b06b6f974063a3cc7c79a839b0d7c1aaff34..14020c1b051268d5e24ef75ce4e05bbe59058260 100644 --- a/arch/arm64/boot/dts/qcom/sa8195p-camera.dtsi +++ b/arch/arm64/boot/dts/qcom/sa8195p-camera.dtsi @@ -1507,7 +1507,6 @@ num-ipe = <2>; num-bps = <1>; icp_pc_en; - ipe_bps_pc_en; status = "ok"; }; diff --git a/arch/arm64/boot/dts/qcom/sa8195p-v2-adp-air-capture.dts b/arch/arm64/boot/dts/qcom/sa8195p-v2-adp-air-capture.dts new file mode 100644 index 0000000000000000000000000000000000000000..0c26fc9bed696555b6f185d7d7290c870063e316 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sa8195p-v2-adp-air-capture.dts @@ -0,0 +1,22 @@ +/* Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/dts-v1/; + +#include "sa8195-capture.dtsi" + +/ { + model = "Qualcomm Technologies, Inc. SA8195P V2 ADP AIR Capture"; + compatible = "qcom,sa8195p-v2-adp-air", "qcom,sa8195p", "qcom,adp-air"; + qcom,board-id = <0x02010019 0>; +}; + diff --git a/arch/arm64/boot/dts/qcom/sa8195p-v2-adp-air-display-lxc-shd.dtsi b/arch/arm64/boot/dts/qcom/sa8195p-v2-adp-air-display-lxc-shd.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..1d78f815704e2d481a59bf37a4022cccb46da3f3 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sa8195p-v2-adp-air-display-lxc-shd.dtsi @@ -0,0 +1,101 @@ +/* Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +&soc { + sde_sh_base0: qcom,shared-display-base@0 { + qcom,shared-display-base-intf = <1>; + }; + + sde_sh_base1: qcom,shared-display-base@1 { + qcom,shared-display-base-intf = <2>; + }; + + sde_sh_base2: qcom,shared-display-base@2 { + qcom,shared-display-base-intf = <0>; + }; + + sde_sh_base3: qcom,shared-display-base@3 { + qcom,shared-display-base-intf = <4>; + }; + + sde_sh0: qcom,shared-display@0 { + compatible = "qcom,shared-display"; + qcom,shared-display-base = <&sde_sh_base0>; + qcom,blend-stage-range = <0 4>; + qcom,shared-display-name = "shared-disp-0"; + qcom,display-type = "primary"; + }; + + sde_sh1: qcom,shared-display@1 { + compatible = "qcom,shared-display"; + qcom,shared-display-base = <&sde_sh_base1>; + qcom,blend-stage-range = <0 4>; + qcom,display-type = "primary"; + qcom,shared-display-name = "shared-disp-1"; + }; + + sde_sh2: qcom,shared-display@2 { + compatible = "qcom,shared-display"; + qcom,shared-display-base = <&sde_sh_base2>; + qcom,blend-stage-range = <0 4>; + qcom,display-type = "primary"; + qcom,shared-display-name = "shared-disp-2"; + }; + + sde_sh3: qcom,shared-display@3 { + compatible = "qcom,shared-display"; + qcom,shared-display-base = <&sde_sh_base3>; + qcom,blend-stage-range = <0 4>; + qcom,shared-display-name = "shared-disp-3"; + qcom,display-type = "primary"; + }; + + sde_card1: qcom,sde-kms-lease@0 { + compatible = "qcom,sde-kms-lease"; + qcom,dev-name = "msm_drm"; + qcom,lease-connectors = "shared-disp-0"; + qcom,lease-planes = "plane-0", "plane-1", + "plane-12","plane-13"; + }; + + sde_card2: qcom,sde-kms-lease@1 { + compatible = "qcom,sde-kms-lease"; + qcom,dev-name = "msm_drm"; + qcom,lease-connectors = "shared-disp-1"; + qcom,lease-planes = "plane-2","plane-3", + "plane-14","plane-15"; + }; + + sde_card3: qcom,sde-kms-lease@2 { + compatible = "qcom,sde-kms-lease"; + qcom,dev-name = "msm_drm"; + qcom,lease-connectors = "shared-disp-2"; + qcom,lease-planes = "plane-4","plane-5", + "plane-8","plane-9"; + }; + + sde_card4: qcom,sde-kms-lease@3 { + compatible = "qcom,sde-kms-lease"; + qcom,dev-name = "msm_drm"; + qcom,lease-connectors = "shared-disp-3"; + qcom,lease-planes = "plane-6","plane-7", + "plane-10","plane-11"; + }; +}; + +&mdss_mdp { + qcom,sde-crtc-num-pref = <7>; + qcom,sde-reg-dma-version = <0>; + connectors = <&dsi_dp1 &dsi_dp2 &sde_dp0 &sde_dp1 &sde_edp &sde_wb + &sde_sh0 &sde_sh1 &sde_sh2 &sde_sh3 + &sde_card1 &sde_card2 &sde_card3 &sde_card4>; +}; diff --git a/arch/arm64/boot/dts/qcom/sa8195p-v2-adp-air-lxc-overlay.dts b/arch/arm64/boot/dts/qcom/sa8195p-v2-adp-air-lxc-overlay.dts new file mode 100644 index 0000000000000000000000000000000000000000..b5bc9b6335ec5a0508caeb2058f51eed1f822ded --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sa8195p-v2-adp-air-lxc-overlay.dts @@ -0,0 +1,31 @@ +/* Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/dts-v1/; +/plugin/; + +#include "sa8195p-adp-common.dtsi" +#include "sa8195p-adp-star-display.dtsi" +#include "sa8195p-v2-adp-air-display-lxc-shd.dtsi" + +/ { + model = "ADP-AIR"; + compatible = "qcom,sa8195p-v2-adp-air", "qcom,sa8195p", + "qcom,adp-air"; + qcom,board-id = <0x02010019 0>; +}; + +&qupv3_se0_spi { + can-controller@0 { + status = "disabled"; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/sa8195p-v2-adp-air-overlay.dts b/arch/arm64/boot/dts/qcom/sa8195p-v2-adp-air-overlay.dts index 1f59cbbc5f706705a710bdec50967bf9ef78fe4c..82ca90f6eaa11c1511bce881f5707f82ff7f722c 100644 --- a/arch/arm64/boot/dts/qcom/sa8195p-v2-adp-air-overlay.dts +++ b/arch/arm64/boot/dts/qcom/sa8195p-v2-adp-air-overlay.dts @@ -15,6 +15,7 @@ #include "sa8195p-adp-common.dtsi" #include "sa8195p-adp-star-display.dtsi" +#include "sa8195p-adp-star-display-shd.dtsi" / { model = "ADP-AIR"; diff --git a/arch/arm64/boot/dts/qcom/sa8195p.dtsi b/arch/arm64/boot/dts/qcom/sa8195p.dtsi index 3e31e5d48b2ac383febd5eac85b7b4ec31c3897e..6b49d09e349c4596de6ab28e4626811f3eb5efb8 100644 --- a/arch/arm64/boot/dts/qcom/sa8195p.dtsi +++ b/arch/arm64/boot/dts/qcom/sa8195p.dtsi @@ -46,6 +46,10 @@ interrupts = ; number-of-rate-detectors = <2>; rate-detector-interfaces = <0 1>; + iommus = <&apps_smmu 0x1B5C 0x1>, + <&apps_smmu 0x1B5E 0x0>; + qcom,smmu-s1-bypass; + qcom,iova-mapping = <0x0 0xFFFFFFFF>; sdr0: qcom,hs0_i2s { compatible = "qcom,hsi2s-interface"; @@ -57,9 +61,6 @@ pinctrl-1 = <&hs1_i2s_mclk_sleep &hs1_i2s_sck_sleep &hs1_i2s_ws_sleep &hs1_i2s_data0_sleep &hs1_i2s_data1_sleep>; - iommus = <&apps_smmu 0x1B5C 0x0>; - qcom,smmu-s1-bypass; - qcom,iova-mapping = <0x0 0xFFFFFFFF>; bit-clock-hz = <12288000>; data-buffer-ms = <10>; bit-depth = <32>; @@ -89,9 +90,6 @@ pinctrl-1 = <&hs2_i2s_mclk_sleep &hs2_i2s_sck_sleep &hs2_i2s_ws_sleep &hs2_i2s_data0_sleep &hs2_i2s_data1_sleep>; - iommus = <&apps_smmu 0x1B5D 0x0>; - qcom,smmu-s1-bypass; - qcom,iova-mapping = <0x0 0xFFFFFFFF>; bit-clock-hz = <12288000>; data-buffer-ms = <10>; bit-depth = <32>; @@ -121,9 +119,6 @@ pinctrl-1 = <&hs3_i2s_mclk_sleep &hs3_i2s_sck_sleep &hs3_i2s_ws_sleep &hs3_i2s_data0_sleep &hs3_i2s_data1_sleep>; - iommus = <&apps_smmu 0x1B5E 0x0>; - qcom,smmu-s1-bypass; - qcom,iova-mapping = <0x0 0xFFFFFFFF>; bit-clock-hz = <12288000>; data-buffer-ms = <10>; bit-depth = <32>; diff --git a/arch/arm64/boot/dts/qcom/sdm429.dtsi b/arch/arm64/boot/dts/qcom/sdm429.dtsi index a01665ee76b361ab8c157c0e241b990dbc1c9197..b3b595559f4d37ce3c40441d0fd180ed6a858145 100644 --- a/arch/arm64/boot/dts/qcom/sdm429.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm429.dtsi @@ -111,7 +111,7 @@ alloc-ranges = <0 0x00000000 0 0xffffffff>; reusable; alignment = <0 0x400000>; - size = <0 0x1000000>; + size = <0 0x400000>; }; adsp_mem: adsp_region@0 { @@ -512,8 +512,9 @@ sdhc_1: sdhci@7824900 { compatible = "qcom,sdhci-msm"; - reg = <0x7824900 0x500>, <0x7824000 0x800>, <0x7824e00 0x200>; - reg-names = "hc_mem", "core_mem", "cmdq_mem"; + reg = <0x7824900 0x500>, <0x7824000 0x800>, <0x7824e00 0x200>, + <0x7803000 0x8000>; + reg-names = "hc_mem", "core_mem", "cmdq_mem", "cmdq_ice"; interrupts = , ; @@ -901,7 +902,7 @@ qcom,hsusb-otg-phy-init-seq = <0x43 0x80 0x06 0x82 0xffffffff>; qcom,hsusb-otg-phy-type = <3>; /* SNPS Femto PHY */ - qcom,hsusb-otg-mode = <3>; /* OTG mode */ + qcom,hsusb-otg-mode = <1>; /* DEVICE mode */ qcom,hsusb-otg-otg-control = <2>; /* PMIC */ qcom,dp-manual-pullup; qcom,phy-dvdd-always-on; @@ -1307,6 +1308,78 @@ reg = <0x08600720 0x2000>; }; + qcom_crypto: qcrypto@720000 { + compatible = "qcom,qcrypto"; + reg = <0x720000 0x20000>, + <0x704000 0x20000>; + reg-names = "crypto-base","crypto-bam-base"; + interrupts = <0 207 0>; + qcom,bam-pipe-pair = <2>; + qcom,ce-hw-instance = <0>; + qcom,ce-device = <0>; + qcom,ce-hw-shared; + qcom,clk-mgmt-sus-res; + qcom,msm-bus,name = "qcrypto-noc"; + qcom,msm-bus,num-cases = <2>; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = + <55 512 0 0>, + <55 512 393600 393600>; + clocks = <&gcc CRYPTO_CLK_SRC>, + <&gcc GCC_CRYPTO_CLK>, + <&gcc GCC_CRYPTO_AHB_CLK>, + <&gcc GCC_CRYPTO_AXI_CLK>; + clock-names = "core_clk_src", "core_clk", + "iface_clk", "bus_clk"; + qcom,use-sw-aes-cbc-ecb-ctr-algo; + qcom,use-sw-aes-xts-algo; + qcom,use-sw-aes-ccm-algo; + qcom,use-sw-ahash-algo; + qcom,use-sw-hmac-algo; + qcom,use-sw-aead-algo; + qcom,ce-opp-freq = <100000000>; + }; + + qcom_cedev: qcedev@720000 { + compatible = "qcom,qcedev"; + reg = <0x720000 0x20000>, + <0x704000 0x20000>; + reg-names = "crypto-base","crypto-bam-base"; + interrupts = <0 207 0>; + qcom,bam-pipe-pair = <1>; + qcom,ce-hw-instance = <0>; + qcom,ce-device = <0>; + qcom,ce-hw-shared; + qcom,msm-bus,name = "qcedev-noc"; + qcom,msm-bus,num-cases = <2>; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = + <55 512 0 0>, + <55 512 393600 393600>; + clocks = <&gcc CRYPTO_CLK_SRC>, + <&gcc GCC_CRYPTO_CLK>, + <&gcc GCC_CRYPTO_AHB_CLK>, + <&gcc GCC_CRYPTO_AXI_CLK>; + clock-names = "core_clk_src", "core_clk", + "iface_clk", "bus_clk"; + qcom,ce-opp-freq = <100000000>; + }; + + qcom_rng: qrng@e3000 { + compatible = "qcom,msm-rng"; + reg = <0xe3000 0x1000>; + qcom,msm-rng-iface-clk; + qcom,no-qrng-config; + qcom,msm-bus,name = "msm-rng-noc"; + qcom,msm-bus,num-cases = <2>; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = + <1 618 0 0>, /* No vote */ + <1 618 0 800>; /* 100 MB/s */ + clocks = <&gcc GCC_PRNG_AHB_CLK>; + clock-names = "iface_clk"; + }; + qcom,wcnss-wlan@0a000000 { compatible = "qcom,wcnss_wlan"; reg = <0x0a000000 0x280000>, diff --git a/arch/arm64/boot/dts/qcom/sdm660-common.dtsi b/arch/arm64/boot/dts/qcom/sdm660-common.dtsi index c7ca78f584ced4453883b97337b8c5a9d6cdfd0b..994e246ae76777b2a581c692601a94f7815035cc 100644 --- a/arch/arm64/boot/dts/qcom/sdm660-common.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm660-common.dtsi @@ -505,6 +505,10 @@ snps,is-utmi-l1-suspend; snps,hird-threshold = /bits/ 8 <0x0>; dr_mode = "host"; + linux,sysdev_is_parent; + snps,dis_enblslpm_quirk; + snps,dis_u2_susphy_quirk; + usb-core-id = <1>; }; }; diff --git a/arch/arm64/boot/dts/qcom/sdmshrike-gpu.dtsi b/arch/arm64/boot/dts/qcom/sdmshrike-gpu.dtsi index 65b478bb9716d11fbea3829e62b5743c611a253a..9e463e27c423c68dcb9e1902d64289d5a0d9d013 100644 --- a/arch/arm64/boot/dts/qcom/sdmshrike-gpu.dtsi +++ b/arch/arm64/boot/dts/qcom/sdmshrike-gpu.dtsi @@ -263,6 +263,7 @@ qcom,secure_align_mask = <0xfff>; qcom,retention; qcom,hyp_secure_alloc; + qcom,secure-size = <0x20000000>; /* 512MB */ gfx3d_user: gfx3d_user { compatible = "qcom,smmu-kgsl-cb"; diff --git a/arch/arm64/boot/dts/qcom/sdmshrike-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sdmshrike-pinctrl.dtsi index 8017279a097d874350a81370a9eda592124fba8f..ac0afa77579a8eb6c461a60377f066cb13f09278 100644 --- a/arch/arm64/boot/dts/qcom/sdmshrike-pinctrl.dtsi +++ b/arch/arm64/boot/dts/qcom/sdmshrike-pinctrl.dtsi @@ -3587,7 +3587,7 @@ }; }; - cnss_pins { + cnss_pins: cnss_pins { cnss_wlan_en_active: cnss_wlan_en_active { mux { pins = "gpio169"; @@ -3743,12 +3743,12 @@ bt_en_active: bt_en_active { mux { - pins = "gpio172"; + pins = "gpio172", "gpio171"; function = "gpio"; }; config { - pins = "gpio172"; + pins = "gpio172", "gpio171"; drive-strength = <2>; bias-pull-down; }; @@ -4059,6 +4059,75 @@ }; }; + qupv3_se4_4uart_pins: qupv3_se4_4uart_pins { + qupv3_se4_default_ctsrtsrx: + qupv3_se4_default_ctsrtsrx { + mux { + pins = "gpio51", "gpio52", "gpio54"; + function = "gpio"; + }; + + config { + pins = "gpio51", "gpio52", "gpio54"; + drive-strength = <2>; + bias-pull-down; + }; + }; + + qupv3_se4_default_tx: qupv3_se4_default_tx { + mux { + pins = "gpio53"; + function = "gpio"; + }; + + config { + pins = "gpio53"; + drive-strength = <2>; + bias-pull-up; + }; + }; + + qupv3_se4_ctsrx: qupv3_se4_ctsrx { + mux { + pins = "gpio51", "gpio54"; + function = "qup4"; + }; + + config { + pins = "gpio51", "gpio54"; + drive-strength = <2>; + bias-disable; + }; + }; + + qupv3_se4_rts: qupv3_se4_rts { + mux { + pins = "gpio52"; + function = "qup4"; + }; + + config { + pins = "gpio52"; + drive-strength = <2>; + bias-pull-down; + }; + }; + + qupv3_se4_tx: qupv3_se4_tx { + mux { + pins = "gpio53"; + function = "qup4"; + }; + + config { + pins = "gpio53"; + drive-strength = <2>; + bias-pull-up; + }; + }; + + }; + /* SE 5 pin mappings */ qupv3_se5_i2c_pins: qupv3_se5_i2c_pins { qupv3_se5_i2c_active: qupv3_se5_i2c_active { diff --git a/arch/arm64/boot/dts/qcom/sdmshrike-qupv3.dtsi b/arch/arm64/boot/dts/qcom/sdmshrike-qupv3.dtsi index d6eb08d068d18448f526b151bd294409f887a878..216cfd11d763f363f9249fff03a1a2c2c2175ee0 100644 --- a/arch/arm64/boot/dts/qcom/sdmshrike-qupv3.dtsi +++ b/arch/arm64/boot/dts/qcom/sdmshrike-qupv3.dtsi @@ -327,6 +327,29 @@ status = "disabled"; }; + /* 4-wire UART */ + qupv3_se4_4uart: qcom,qup_uart@890000 { + compatible = "qcom,msm-geni-serial-hs"; + reg = <0x890000 0x4000>; + reg-names = "se_phys"; + clock-names = "se-clk", "m-ahb", "s-ahb"; + clocks = <&clock_gcc GCC_QUPV3_WRAP0_S4_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>; + pinctrl-names = "default", "active", "sleep"; + pinctrl-0 = <&qupv3_se4_default_ctsrtsrx>, + <&qupv3_se4_default_tx>; + pinctrl-1 = <&qupv3_se4_ctsrx>, <&qupv3_se4_rts>, + <&qupv3_se4_tx>; + pinctrl-2 = <&qupv3_se4_ctsrx>, <&qupv3_se4_rts>, + <&qupv3_se4_tx>; + interrupts-extended = <&pdc GIC_SPI 605 0>, + <&tlmm 54 0>; + qcom,wrapper-core = <&qupv3_0>; + qcom,wakeup-byte = <0xFD>; + status = "disabled"; + }; + /* QUPv3 East0 and East1 Instances * East1 0 : SE 8 * East1 1 : SE 9 diff --git a/arch/arm64/boot/dts/qcom/sdmshrike-sde.dtsi b/arch/arm64/boot/dts/qcom/sdmshrike-sde.dtsi index 1068389f58984a9fb3231831c35e859c2ee47dec..17ccef539b5ee69bcc6b3d569676dfebab0373ea 100644 --- a/arch/arm64/boot/dts/qcom/sdmshrike-sde.dtsi +++ b/arch/arm64/boot/dts/qcom/sdmshrike-sde.dtsi @@ -102,7 +102,8 @@ qcom,sde-cdm-off = <0x7a200>; qcom,sde-cdm-size = <0x224>; - qcom,sde-dsc-off = <0x81000 0x81400 0x81800 0x81c00>; + qcom,sde-dsc-off = <0x81000 0x81400 0x81800 0x81c00 + 0x82000 0x82400>; qcom,sde-dsc-size = <0x140>; qcom,sde-dither-off = <0x30e0 0x30e0 0x30e0 diff --git a/arch/arm64/boot/dts/qcom/sdxprairie-cdp-256.dtsi b/arch/arm64/boot/dts/qcom/sdxprairie-cdp-256.dtsi index c2fdaebf1bb7ffa561909626145fa1259f23038a..96fc4b7855ebf760463178473075299c088e7f7c 100644 --- a/arch/arm64/boot/dts/qcom/sdxprairie-cdp-256.dtsi +++ b/arch/arm64/boot/dts/qcom/sdxprairie-cdp-256.dtsi @@ -144,6 +144,14 @@ thermal-sensors = <&pmxprairie_adc_tm_iio ADC_XO_THERM_PU2>; thermal-governor = "user_space"; wake-capable-sensor; + + trips { + active-config0 { + temperature = <125000>; + hysteresis = <1000>; + type = "passive"; + }; + }; }; pa-therm1-usr { @@ -152,6 +160,14 @@ thermal-sensors = <&pmxprairie_adc_tm_iio ADC_AMUX_THM1_PU2>; thermal-governor = "user_space"; wake-capable-sensor; + + trips { + active-config0 { + temperature = <125000>; + hysteresis = <1000>; + type = "passive"; + }; + }; }; pa-therm2-usr { @@ -160,6 +176,14 @@ thermal-sensors = <&pmxprairie_adc_tm_iio ADC_AMUX_THM2_PU2>; thermal-governor = "user_space"; wake-capable-sensor; + + trips { + active-config0 { + temperature = <125000>; + hysteresis = <1000>; + type = "passive"; + }; + }; }; sdx-case-therm-usr { @@ -168,6 +192,14 @@ thermal-sensors = <&pmxprairie_adc_tm_iio ADC_AMUX_THM3_PU2>; thermal-governor = "user_space"; wake-capable-sensor; + + trips { + active-config0 { + temperature = <125000>; + hysteresis = <1000>; + type = "passive"; + }; + }; }; ambient-therm-usr { @@ -176,6 +208,14 @@ thermal-sensors = <&pmxprairie_adc_tm_iio ADC_GPIO1_PU2>; thermal-governor = "user_space"; wake-capable-sensor; + + trips { + active-config0 { + temperature = <125000>; + hysteresis = <1000>; + type = "passive"; + }; + }; }; }; diff --git a/arch/arm64/boot/dts/qcom/sdxprairie-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdxprairie-cdp.dtsi index 5ae2b2fc5d801fbdef7a581a60ad720a15823800..8bc9c76aef51e0a533450ca506e1f8ea22727c59 100644 --- a/arch/arm64/boot/dts/qcom/sdxprairie-cdp.dtsi +++ b/arch/arm64/boot/dts/qcom/sdxprairie-cdp.dtsi @@ -144,6 +144,14 @@ thermal-sensors = <&pmxprairie_adc_tm_iio ADC_XO_THERM_PU2>; thermal-governor = "user_space"; wake-capable-sensor; + + trips { + active-config0 { + temperature = <125000>; + hysteresis = <1000>; + type = "passive"; + }; + }; }; pa-therm1-usr { @@ -152,6 +160,14 @@ thermal-sensors = <&pmxprairie_adc_tm_iio ADC_AMUX_THM1_PU2>; thermal-governor = "user_space"; wake-capable-sensor; + + trips { + active-config0 { + temperature = <125000>; + hysteresis = <1000>; + type = "passive"; + }; + }; }; pa-therm2-usr { @@ -160,6 +176,14 @@ thermal-sensors = <&pmxprairie_adc_tm_iio ADC_AMUX_THM2_PU2>; thermal-governor = "user_space"; wake-capable-sensor; + + trips { + active-config0 { + temperature = <125000>; + hysteresis = <1000>; + type = "passive"; + }; + }; }; sdx-case-therm-usr { @@ -168,6 +192,14 @@ thermal-sensors = <&pmxprairie_adc_tm_iio ADC_AMUX_THM3_PU2>; thermal-governor = "user_space"; wake-capable-sensor; + + trips { + active-config0 { + temperature = <125000>; + hysteresis = <1000>; + type = "passive"; + }; + }; }; ambient-therm-usr { @@ -176,6 +208,14 @@ thermal-sensors = <&pmxprairie_adc_tm_iio ADC_GPIO1_PU2>; thermal-governor = "user_space"; wake-capable-sensor; + + trips { + active-config0 { + temperature = <125000>; + hysteresis = <1000>; + type = "passive"; + }; + }; }; }; diff --git a/arch/arm64/boot/dts/qcom/sdxprairie-mtp-256.dtsi b/arch/arm64/boot/dts/qcom/sdxprairie-mtp-256.dtsi index f670392012d775cd3a1a46b8f6fa7d223ae9eb63..04761ea44366cde48a7439671f162fc9106a2bf9 100644 --- a/arch/arm64/boot/dts/qcom/sdxprairie-mtp-256.dtsi +++ b/arch/arm64/boot/dts/qcom/sdxprairie-mtp-256.dtsi @@ -144,6 +144,14 @@ thermal-sensors = <&pmxprairie_adc_tm_iio ADC_XO_THERM_PU2>; thermal-governor = "user_space"; wake-capable-sensor; + + trips { + active-config0 { + temperature = <125000>; + hysteresis = <1000>; + type = "passive"; + }; + }; }; pa-therm1-usr { @@ -152,6 +160,14 @@ thermal-sensors = <&pmxprairie_adc_tm_iio ADC_AMUX_THM1_PU2>; thermal-governor = "user_space"; wake-capable-sensor; + + trips { + active-config0 { + temperature = <125000>; + hysteresis = <1000>; + type = "passive"; + }; + }; }; pa-therm2-usr { @@ -160,6 +176,14 @@ thermal-sensors = <&pmxprairie_adc_tm_iio ADC_AMUX_THM2_PU2>; thermal-governor = "user_space"; wake-capable-sensor; + + trips { + active-config0 { + temperature = <125000>; + hysteresis = <1000>; + type = "passive"; + }; + }; }; sdx-case-therm-usr { @@ -168,6 +192,14 @@ thermal-sensors = <&pmxprairie_adc_tm_iio ADC_AMUX_THM3_PU2>; thermal-governor = "user_space"; wake-capable-sensor; + + trips { + active-config0 { + temperature = <125000>; + hysteresis = <1000>; + type = "passive"; + }; + }; }; ambient-therm-usr { @@ -176,6 +208,14 @@ thermal-sensors = <&pmxprairie_adc_tm_iio ADC_GPIO1_PU2>; thermal-governor = "user_space"; wake-capable-sensor; + + trips { + active-config0 { + temperature = <125000>; + hysteresis = <1000>; + type = "passive"; + }; + }; }; }; diff --git a/arch/arm64/boot/dts/qcom/sdxprairie-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdxprairie-mtp.dtsi index ebc21f77b4b3ae917379d3db2c8e5b3be69ccafd..f6400234107c92467e25b05d91e8bdb5e0f0e8a7 100644 --- a/arch/arm64/boot/dts/qcom/sdxprairie-mtp.dtsi +++ b/arch/arm64/boot/dts/qcom/sdxprairie-mtp.dtsi @@ -144,6 +144,14 @@ thermal-sensors = <&pmxprairie_adc_tm_iio ADC_XO_THERM_PU2>; thermal-governor = "user_space"; wake-capable-sensor; + + trips { + active-config0 { + temperature = <125000>; + hysteresis = <1000>; + type = "passive"; + }; + }; }; pa-therm1-usr { @@ -152,6 +160,14 @@ thermal-sensors = <&pmxprairie_adc_tm_iio ADC_AMUX_THM1_PU2>; thermal-governor = "user_space"; wake-capable-sensor; + + trips { + active-config0 { + temperature = <125000>; + hysteresis = <1000>; + type = "passive"; + }; + }; }; pa-therm2-usr { @@ -160,6 +176,14 @@ thermal-sensors = <&pmxprairie_adc_tm_iio ADC_AMUX_THM2_PU2>; thermal-governor = "user_space"; wake-capable-sensor; + + trips { + active-config0 { + temperature = <125000>; + hysteresis = <1000>; + type = "passive"; + }; + }; }; sdx-case-therm-usr { @@ -168,6 +192,14 @@ thermal-sensors = <&pmxprairie_adc_tm_iio ADC_AMUX_THM3_PU2>; thermal-governor = "user_space"; wake-capable-sensor; + + trips { + active-config0 { + temperature = <125000>; + hysteresis = <1000>; + type = "passive"; + }; + }; }; ambient-therm-usr { @@ -176,6 +208,14 @@ thermal-sensors = <&pmxprairie_adc_tm_iio ADC_GPIO1_PU2>; thermal-governor = "user_space"; wake-capable-sensor; + + trips { + active-config0 { + temperature = <125000>; + hysteresis = <1000>; + type = "passive"; + }; + }; }; }; diff --git a/arch/arm64/boot/dts/qcom/sdxprairie-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sdxprairie-pinctrl.dtsi index edba1fd6cdbb397160ef58b33abcec738df82986..9d8697f76452a6aa0ea5a044c1f18ec0aef8df29 100644 --- a/arch/arm64/boot/dts/qcom/sdxprairie-pinctrl.dtsi +++ b/arch/arm64/boot/dts/qcom/sdxprairie-pinctrl.dtsi @@ -1685,5 +1685,20 @@ bias-pull-down; }; }; + sja1105_pin { + sja1105_default: sja1105_default { + mux { + pins = "gpio89", "gpio91", "gpio102"; + function = "gpio"; + }; + + config { + pins = "gpio89", "gpio91", "gpio102"; + drive-strength = <4>; + bias-pull-up; + output-high; + }; + }; + }; }; }; diff --git a/arch/arm64/boot/dts/qcom/sdxprairie-thermal-common.dtsi b/arch/arm64/boot/dts/qcom/sdxprairie-thermal-common.dtsi index 0ad7d91a16592f9b89d0de9e328b934d0b90bb1b..980d23fe4b10c0033a0350a65c59b4af10d35a65 100644 --- a/arch/arm64/boot/dts/qcom/sdxprairie-thermal-common.dtsi +++ b/arch/arm64/boot/dts/qcom/sdxprairie-thermal-common.dtsi @@ -1,4 +1,4 @@ -/* Copyright (c) 2019, The Linux Foundation. All rights reserved. +/* Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -128,7 +128,12 @@ "qfe_wtr_pa3", "sys_therm1", "sys_therm2", - "modem_tsens1"; + "modem_tsens1", + "qfe_ret_pa0_fr1", + "qfe_wtr_pa0_fr1", + "qfe_wtr_pa1_fr1", + "qfe_wtr_pa2_fr1", + "qfe_wtr_pa3_fr1"; }; }; }; @@ -389,4 +394,164 @@ }; }; }; + + modem-ret-0-usr { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "user_space"; + thermal-sensors = <&qmi_sensor + (QMI_MODEM_NR_INST_ID+QMI_QFE_RET_PA_0)>; + wake-capable-sensor; + trips { + active-config0 { + temperature = <125000>; + hysteresis = <1000>; + type = "passive"; + }; + }; + }; + + modem-wtr-0-usr { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "user_space"; + thermal-sensors = <&qmi_sensor + (QMI_MODEM_NR_INST_ID+QMI_QFE_WTR_PA_0)>; + wake-capable-sensor; + trips { + active-config0 { + temperature = <125000>; + hysteresis = <1000>; + type = "passive"; + }; + }; + }; + + modem-wtr-1-usr { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "user_space"; + thermal-sensors = <&qmi_sensor + (QMI_MODEM_NR_INST_ID+QMI_QFE_WTR_PA_1)>; + wake-capable-sensor; + trips { + active-config0 { + temperature = <125000>; + hysteresis = <1000>; + type = "passive"; + }; + }; + }; + + modem-wtr-2-usr { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "user_space"; + thermal-sensors = <&qmi_sensor + (QMI_MODEM_NR_INST_ID+QMI_QFE_WTR_PA_2)>; + wake-capable-sensor; + trips { + active-config0 { + temperature = <125000>; + hysteresis = <1000>; + type = "passive"; + }; + }; + }; + + modem-wtr-3-usr { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "user_space"; + thermal-sensors = <&qmi_sensor + (QMI_MODEM_NR_INST_ID+QMI_QFE_WTR_PA_3)>; + wake-capable-sensor; + trips { + active-config0 { + temperature = <125000>; + hysteresis = <1000>; + type = "passive"; + }; + }; + }; + + modem-ret-0-fr1-usr { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "user_space"; + thermal-sensors = <&qmi_sensor + (QMI_MODEM_NR_INST_ID+QMI_QFE_RET_PA_0_FR1)>; + wake-capable-sensor; + trips { + active-config0 { + temperature = <125000>; + hysteresis = <1000>; + type = "passive"; + }; + }; + }; + + modem-wtr-0-fr1-usr { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "user_space"; + thermal-sensors = <&qmi_sensor + (QMI_MODEM_NR_INST_ID+QMI_QFE_WTR_PA_0_FR1)>; + wake-capable-sensor; + trips { + active-config0 { + temperature = <125000>; + hysteresis = <1000>; + type = "passive"; + }; + }; + }; + + modem-wtr-1-fr1-usr { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "user_space"; + thermal-sensors = <&qmi_sensor + (QMI_MODEM_NR_INST_ID+QMI_QFE_WTR_PA_1_FR1)>; + wake-capable-sensor; + trips { + active-config0 { + temperature = <125000>; + hysteresis = <1000>; + type = "passive"; + }; + }; + }; + + modem-wtr-2-fr1-usr { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "user_space"; + thermal-sensors = <&qmi_sensor + (QMI_MODEM_NR_INST_ID+QMI_QFE_WTR_PA_2_FR1)>; + wake-capable-sensor; + trips { + active-config0 { + temperature = <125000>; + hysteresis = <1000>; + type = "passive"; + }; + }; + }; + + modem-wtr-3-fr1-usr { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "user_space"; + thermal-sensors = <&qmi_sensor + (QMI_MODEM_NR_INST_ID+QMI_QFE_WTR_PA_3_FR1)>; + wake-capable-sensor; + trips { + active-config0 { + temperature = <125000>; + hysteresis = <1000>; + type = "passive"; + }; + }; + }; }; diff --git a/arch/arm64/boot/dts/qcom/sm8150-marmot.dtsi b/arch/arm64/boot/dts/qcom/sm8150-marmot.dtsi index 41455dacc67722d64e82cc4933372e69ca8b2915..f7dac35eea9add5e238914720868e9747e2ef93d 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-marmot.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-marmot.dtsi @@ -171,7 +171,7 @@ mhi_event@7 { mhi,num-elements = <2048>; mhi,intmod = <5>; - mhi,msi = <6>; + mhi,msi = <5>; mhi,chan = <101>; mhi,priority = <1>; mhi,brstmode = <3>; @@ -182,7 +182,7 @@ mhi_event@8 { mhi,num-elements = <0>; mhi,intmod = <0>; - mhi,msi = <7>; + mhi,msi = <6>; mhi,chan = <102>; mhi,priority = <1>; mhi,brstmode = <3>; @@ -194,7 +194,7 @@ mhi_event@9 { mhi,num-elements = <1024>; mhi,intmod = <5>; - mhi,msi = <8>; + mhi,msi = <7>; mhi,chan = <103>; mhi,priority = <1>; mhi,brstmode = <2>; @@ -204,7 +204,7 @@ mhi_event@10 { mhi,num-elements = <0>; mhi,intmod = <0>; - mhi,msi = <9>; + mhi,msi = <8>; mhi,chan = <105>; mhi,priority = <1>; mhi,brstmode = <3>; @@ -216,7 +216,7 @@ mhi_event@11 { mhi,num-elements = <0>; mhi,intmod = <0>; - mhi,msi = <10>; + mhi,msi = <9>; mhi,chan = <106>; mhi,priority = <1>; mhi,brstmode = <3>; @@ -228,7 +228,7 @@ mhi_event@12 { mhi,num-elements = <0>; mhi,intmod = <0>; - mhi,msi = <11>; + mhi,msi = <10>; mhi,chan = <107>; mhi,priority = <1>; mhi,brstmode = <3>; @@ -240,7 +240,7 @@ mhi_event@13 { mhi,num-elements = <0>; mhi,intmod = <0>; - mhi,msi = <12>; + mhi,msi = <11>; mhi,chan = <108>; mhi,priority = <1>; mhi,brstmode = <3>; @@ -252,7 +252,7 @@ mhi_event@14 { mhi,num-elements = <1024>; mhi,intmod = <1>; - mhi,msi = <13>; + mhi,msi = <12>; mhi,chan = <109>; mhi,priority = <0>; mhi,brstmode = <2>; @@ -262,7 +262,7 @@ mhi_event@15 { mhi,num-elements = <1024>; mhi,intmod = <0>; - mhi,msi = <14>; + mhi,msi = <13>; mhi,chan = <110>; mhi,priority = <0>; mhi,brstmode = <2>; diff --git a/arch/arm64/boot/dts/qcom/sm8150-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sm8150-pinctrl.dtsi index bb06cbca433e09360b62bcbc007bb1e76a381a6e..7f1c4390a36b7dabbfc05af8207147ee3a218bc3 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-pinctrl.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-pinctrl.dtsi @@ -925,6 +925,75 @@ }; }; + qupv3_se4_4uart_pins: qupv3_se4_4uart_pins { + qupv3_se4_default_ctsrtsrx: + qupv3_se4_default_ctsrtsrx { + mux { + pins = "gpio51", "gpio52", "gpio54"; + function = "gpio"; + }; + + config { + pins = "gpio51", "gpio52", "gpio54"; + drive-strength = <2>; + bias-pull-down; + }; + }; + + qupv3_se4_default_tx: qupv3_se4_default_tx { + mux { + pins = "gpio53"; + function = "gpio"; + }; + + config { + pins = "gpio53"; + drive-strength = <2>; + bias-pull-up; + }; + }; + + qupv3_se4_ctsrx: qupv3_se4_ctsrx { + mux { + pins = "gpio51", "gpio54"; + function = "qup4"; + }; + + config { + pins = "gpio51", "gpio54"; + drive-strength = <2>; + bias-disable; + }; + }; + + qupv3_se4_rts: qupv3_se4_rts { + mux { + pins = "gpio52"; + function = "qup4"; + }; + + config { + pins = "gpio52"; + drive-strength = <2>; + bias-pull-down; + }; + }; + + qupv3_se4_tx: qupv3_se4_tx { + mux { + pins = "gpio53"; + function = "qup4"; + }; + + config { + pins = "gpio53"; + drive-strength = <2>; + bias-pull-up; + }; + }; + + }; + qupv3_se4_2uart_pins: qupv3_se4_2uart_pins { qupv3_se4_2uart_default: qupv3_se4_2uart_default { mux { diff --git a/arch/arm64/boot/dts/rockchip/rk3328-evb.dts b/arch/arm64/boot/dts/rockchip/rk3328-evb.dts index 8e6a6543175673d6fd9e3d7e5d370b29ac7a2c50..e67d1c436889979deb6f5f689b3e2bab275148d2 100644 --- a/arch/arm64/boot/dts/rockchip/rk3328-evb.dts +++ b/arch/arm64/boot/dts/rockchip/rk3328-evb.dts @@ -91,7 +91,7 @@ &i2c1 { status = "okay"; - rk805: rk805@18 { + rk805: pmic@18 { compatible = "rockchip,rk805"; reg = <0x18>; interrupt-parent = <&gpio2>; diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts index 3f8f528099a8099f12c080c5e694d4fd9aa2f044..cae9ca74ac855f334dba2450b43fe81be7cb8024 100644 --- a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts +++ b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts @@ -149,7 +149,7 @@ &i2c1 { status = "okay"; - rk805: rk805@18 { + rk805: pmic@18 { compatible = "rockchip,rk805"; reg = <0x18>; interrupt-parent = <&gpio2>; diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi index ab7629c5b856d7a6ed2ac8e95600262e098a01d6..b63d9653ff559cbd5a7e75424e41c0a96ce52288 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi @@ -402,7 +402,7 @@ "bus_clk", "grf_clk"; status = "disabled"; - usbdrd_dwc3_0: dwc3 { + usbdrd_dwc3_0: usb@fe800000 { compatible = "snps,dwc3"; reg = <0x0 0xfe800000 0x0 0x100000>; interrupts = ; @@ -430,7 +430,7 @@ "bus_clk", "grf_clk"; status = "disabled"; - usbdrd_dwc3_1: dwc3 { + usbdrd_dwc3_1: usb@fe900000 { compatible = "snps,dwc3"; reg = <0x0 0xfe900000 0x0 0x100000>; interrupts = ; @@ -1691,10 +1691,10 @@ gpu: gpu@ff9a0000 { compatible = "rockchip,rk3399-mali", "arm,mali-t860"; reg = <0x0 0xff9a0000 0x0 0x10000>; - interrupts = , - , - ; - interrupt-names = "gpu", "job", "mmu"; + interrupts = , + , + ; + interrupt-names = "job", "mmu", "gpu"; clocks = <&cru ACLK_GPU>; power-domains = <&power RK3399_PD_GPU>; status = "disabled"; diff --git a/arch/arm64/configs/cuttlefish_defconfig b/arch/arm64/configs/cuttlefish_defconfig index 56e6526205416f893a2ad7afa7504e0ecf1c7b88..b753e71e4bd8961dbaea925e4e17c626b952f1e2 100644 --- a/arch/arm64/configs/cuttlefish_defconfig +++ b/arch/arm64/configs/cuttlefish_defconfig @@ -18,7 +18,6 @@ CONFIG_CPUSETS=y CONFIG_CGROUP_CPUACCT=y CONFIG_CGROUP_BPF=y CONFIG_NAMESPACES=y -# CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_SCHED_AUTOGROUP=y CONFIG_SCHED_TUNE=y @@ -96,7 +95,9 @@ CONFIG_INET=y CONFIG_IP_MULTICAST=y CONFIG_IP_ADVANCED_ROUTER=y CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_NET_IPIP=y CONFIG_NET_IPGRE_DEMUX=y +CONFIG_NET_IPGRE=y CONFIG_NET_IPVTI=y CONFIG_INET_ESP=y # CONFIG_INET_XFRM_MODE_BEET is not set @@ -113,6 +114,7 @@ CONFIG_INET6_ESP=y CONFIG_INET6_IPCOMP=y CONFIG_IPV6_MIP6=y CONFIG_IPV6_VTI=y +CONFIG_IPV6_GRE=y CONFIG_IPV6_MULTIPLE_TABLES=y CONFIG_NETFILTER=y CONFIG_NF_CONNTRACK=y @@ -236,6 +238,7 @@ CONFIG_DUMMY=y CONFIG_NETCONSOLE=y CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_TUN=y +CONFIG_VETH=y CONFIG_VIRTIO_NET=y # CONFIG_ETHERNET is not set CONFIG_PHYLIB=y @@ -401,6 +404,7 @@ CONFIG_MMC=y # CONFIG_PWRSEQ_EMMC is not set # CONFIG_PWRSEQ_SIMPLE is not set # CONFIG_MMC_BLOCK is not set +CONFIG_MMC_CRYPTO=y CONFIG_RTC_CLASS=y # CONFIG_RTC_SYSTOHC is not set CONFIG_RTC_DRV_PL030=y @@ -473,6 +477,7 @@ CONFIG_SECURITY=y CONFIG_SECURITY_NETWORK=y CONFIG_LSM_MMAP_MIN_ADDR=65536 CONFIG_HARDENED_USERCOPY=y +CONFIG_STATIC_USERMODEHELPER=y CONFIG_SECURITY_SELINUX=y CONFIG_INIT_STACK_ALL=y CONFIG_INIT_ON_ALLOC_DEFAULT_ON=y diff --git a/arch/arm64/configs/vendor/gen3auto-capture_defconfig b/arch/arm64/configs/vendor/gen3auto-capture_defconfig index 51d891f5a0ebf594251887fbbc90cda3c5d6fc5a..a7090ced2af3b2921079760d911191f42501c4d8 100644 --- a/arch/arm64/configs/vendor/gen3auto-capture_defconfig +++ b/arch/arm64/configs/vendor/gen3auto-capture_defconfig @@ -60,6 +60,7 @@ CONFIG_PARTITION_ADVANCED=y CONFIG_CFQ_GROUP_IOSCHED=y CONFIG_ARCH_QCOM=y CONFIG_ARCH_SM8150=y +CONFIG_ARCH_SDMSHRIKE=y CONFIG_PCI=y CONFIG_PCI_MSM=y CONFIG_PCI_MSM_MSI=y @@ -275,7 +276,6 @@ CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=8192 CONFIG_BLK_DEV_NVME=y CONFIG_NVME_QCOM=y -CONFIG_HDCP_QSEECOM=y CONFIG_QSEECOM=y CONFIG_UID_SYS_STATS=y CONFIG_MEMORY_STATE_TIME=y @@ -569,6 +569,7 @@ CONFIG_QCOM_CPUSS_DUMP=y CONFIG_QCOM_RUN_QUEUE_STATS=y CONFIG_QCOM_LLCC=y CONFIG_QCOM_SM8150_LLCC=y +CONFIG_QCOM_SDMSHRIKE_LLCC=y CONFIG_QCOM_QMI_HELPERS=y CONFIG_QCOM_SMEM=y CONFIG_QCOM_MEMORY_DUMP_V2=y diff --git a/arch/arm64/configs/vendor/gen3auto-perf_defconfig b/arch/arm64/configs/vendor/gen3auto-perf_defconfig index b771e6f7906b69c3651aa2f0a94be53266197efd..2b6d31b62d1151e01b48512eb7e6fac45448766b 100644 --- a/arch/arm64/configs/vendor/gen3auto-perf_defconfig +++ b/arch/arm64/configs/vendor/gen3auto-perf_defconfig @@ -267,7 +267,6 @@ CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=8192 CONFIG_BLK_DEV_NVME=y CONFIG_NVME_QCOM=y -CONFIG_HDCP_QSEECOM=y CONFIG_QSEECOM=y CONFIG_PROFILER=m CONFIG_UID_SYS_STATS=y diff --git a/arch/arm64/configs/vendor/gen3auto_defconfig b/arch/arm64/configs/vendor/gen3auto_defconfig index 9edf25b4dcdf8c59a0d56c5f4521f80964f7353e..e5ceb314d7c3924f85678d60a8f284e1f9c4c5cd 100644 --- a/arch/arm64/configs/vendor/gen3auto_defconfig +++ b/arch/arm64/configs/vendor/gen3auto_defconfig @@ -278,7 +278,6 @@ CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=8192 CONFIG_BLK_DEV_NVME=y CONFIG_NVME_QCOM=y -CONFIG_HDCP_QSEECOM=y CONFIG_QSEECOM=y CONFIG_PROFILER=m CONFIG_UID_SYS_STATS=y diff --git a/arch/arm64/configs/vendor/qcs405-perf_defconfig b/arch/arm64/configs/vendor/qcs405-perf_defconfig index 433dcaf105f1cd7474d96cc91c08d43f7c8e6e51..cea1b6be71110c72064149c4ce1c3d2589f0009c 100644 --- a/arch/arm64/configs/vendor/qcs405-perf_defconfig +++ b/arch/arm64/configs/vendor/qcs405-perf_defconfig @@ -270,6 +270,7 @@ CONFIG_CLD_LL_CORE=y CONFIG_CNSS=y CONFIG_CNSS_SDIO=y CONFIG_CLD_HL_SDIO_CORE=y +CONFIG_CNSS_GENL=y CONFIG_INPUT_EVDEV=y CONFIG_INPUT_EVBUG=m CONFIG_INPUT_KEYRESET=y diff --git a/arch/arm64/configs/vendor/qcs405_defconfig b/arch/arm64/configs/vendor/qcs405_defconfig index 0dac911cb584cede530db3e5f0c513f66d8fdc7f..ad4bfcca98ee3441c169a7463ee6abf8fb99b640 100644 --- a/arch/arm64/configs/vendor/qcs405_defconfig +++ b/arch/arm64/configs/vendor/qcs405_defconfig @@ -276,6 +276,7 @@ CONFIG_CLD_LL_CORE=y CONFIG_CNSS=y CONFIG_CNSS_SDIO=y CONFIG_CLD_HL_SDIO_CORE=y +CONFIG_CNSS_GENL=y CONFIG_INPUT_EVDEV=y CONFIG_INPUT_EVBUG=m CONFIG_INPUT_KEYRESET=y diff --git a/arch/arm64/configs/vendor/sa2150p-nand-perf_defconfig b/arch/arm64/configs/vendor/sa2150p-nand-perf_defconfig index d78c6d646281f007bf0c3ed38c30b8527cbf3d00..541675328ea6ff15e4739af59c6f4ebd761c69d2 100644 --- a/arch/arm64/configs/vendor/sa2150p-nand-perf_defconfig +++ b/arch/arm64/configs/vendor/sa2150p-nand-perf_defconfig @@ -429,7 +429,6 @@ CONFIG_IIO=y CONFIG_QCOM_SPMI_ADC5=y CONFIG_PWM=y CONFIG_PWM_QTI_LPG=y -CONFIG_QCOM_KGSL=y CONFIG_QTI_MPM=y CONFIG_ANDROID=y CONFIG_ANDROID_BINDER_IPC=y diff --git a/arch/arm64/configs/vendor/sa2150p-nand_defconfig b/arch/arm64/configs/vendor/sa2150p-nand_defconfig index 6194d3f50c3ffb71471311fe2768eec9b6c8391f..637543955dca79a7242a7b42ec10b63a81d700f2 100644 --- a/arch/arm64/configs/vendor/sa2150p-nand_defconfig +++ b/arch/arm64/configs/vendor/sa2150p-nand_defconfig @@ -430,7 +430,6 @@ CONFIG_IIO=y CONFIG_QCOM_SPMI_ADC5=y CONFIG_PWM=y CONFIG_PWM_QTI_LPG=y -CONFIG_QCOM_KGSL=y CONFIG_QTI_MPM=y CONFIG_ANDROID=y CONFIG_ANDROID_BINDER_IPC=y diff --git a/arch/arm64/configs/vendor/sa8155-perf_defconfig b/arch/arm64/configs/vendor/sa8155-perf_defconfig index 577e79272295dfc76230e12cc2f497f0ba285589..8d8ce26c881b8682512d72804b7ad769bdac032d 100644 --- a/arch/arm64/configs/vendor/sa8155-perf_defconfig +++ b/arch/arm64/configs/vendor/sa8155-perf_defconfig @@ -68,7 +68,6 @@ CONFIG_HZ_100=y CONFIG_CMA=y CONFIG_ZSMALLOC=y CONFIG_SECCOMP=y -# CONFIG_HARDEN_BRANCH_PREDICTOR is not set CONFIG_ARMV8_DEPRECATED=y CONFIG_SWP_EMULATION=y CONFIG_CP15_BARRIER_EMULATION=y @@ -267,7 +266,6 @@ CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=8192 CONFIG_BLK_DEV_NVME=y CONFIG_NVME_QCOM=y -CONFIG_HDCP_QSEECOM=y CONFIG_QSEECOM=y CONFIG_PROFILER=m CONFIG_UID_SYS_STATS=y @@ -617,6 +615,7 @@ CONFIG_EXT4_FS=y CONFIG_EXT4_FS_SECURITY=y CONFIG_EXT4_ENCRYPTION=y CONFIG_FS_ENCRYPTION_INLINE_CRYPT=y +CONFIG_FS_VERITY=y CONFIG_QUOTA=y CONFIG_QUOTA_NETLINK_INTERFACE=y CONFIG_QFMT_V2=y @@ -643,6 +642,7 @@ CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_HARDENED_USERCOPY=y CONFIG_FORTIFY_SOURCE=y +CONFIG_STATIC_USERMODEHELPER=y CONFIG_SECURITY_SELINUX=y CONFIG_SECURITY_SMACK=y CONFIG_CRYPTO_GCM=y diff --git a/arch/arm64/configs/vendor/sa8155_defconfig b/arch/arm64/configs/vendor/sa8155_defconfig index e57b7e9239c76bf0215de9fb256177d3f409ab39..4b2b5fdbebb466f77f61d15aa88fac1d9d7ee025 100644 --- a/arch/arm64/configs/vendor/sa8155_defconfig +++ b/arch/arm64/configs/vendor/sa8155_defconfig @@ -74,7 +74,6 @@ CONFIG_CMA_DEBUGFS=y CONFIG_ZSMALLOC=y CONFIG_SECCOMP=y CONFIG_KEXEC=y -# CONFIG_HARDEN_BRANCH_PREDICTOR is not set CONFIG_PRINT_VMEMLAYOUT=y CONFIG_ARMV8_DEPRECATED=y CONFIG_SWP_EMULATION=y @@ -278,7 +277,6 @@ CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=8192 CONFIG_BLK_DEV_NVME=y CONFIG_NVME_QCOM=y -CONFIG_HDCP_QSEECOM=y CONFIG_QSEECOM=y CONFIG_PROFILER=m CONFIG_UID_SYS_STATS=y @@ -651,6 +649,7 @@ CONFIG_EXT4_FS=y CONFIG_EXT4_FS_SECURITY=y CONFIG_EXT4_ENCRYPTION=y CONFIG_FS_ENCRYPTION_INLINE_CRYPT=y +CONFIG_FS_VERITY=y CONFIG_QUOTA=y CONFIG_QUOTA_NETLINK_INTERFACE=y CONFIG_QFMT_V2=y @@ -742,6 +741,7 @@ CONFIG_SECURITY=y CONFIG_HARDENED_USERCOPY=y CONFIG_HARDENED_USERCOPY_PAGESPAN=y CONFIG_FORTIFY_SOURCE=y +CONFIG_STATIC_USERMODEHELPER=y CONFIG_SECURITY_SELINUX=y CONFIG_SECURITY_SMACK=y CONFIG_CRYPTO_GCM=y diff --git a/arch/arm64/configs/vendor/sdmshrike-perf_defconfig b/arch/arm64/configs/vendor/sdmshrike-perf_defconfig index 42000079612e72f128a8b816e3c259546a2449a3..500d8b14d4cd44b181be94b1b6e26fe2c82a88ed 100644 --- a/arch/arm64/configs/vendor/sdmshrike-perf_defconfig +++ b/arch/arm64/configs/vendor/sdmshrike-perf_defconfig @@ -262,7 +262,6 @@ CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=8192 CONFIG_BLK_DEV_NVME=y CONFIG_NVME_QCOM=y -CONFIG_HDCP_QSEECOM=y CONFIG_QSEECOM=y CONFIG_PROFILER=m CONFIG_UID_SYS_STATS=y diff --git a/arch/arm64/configs/vendor/sdmshrike_defconfig b/arch/arm64/configs/vendor/sdmshrike_defconfig index 4d96999f73238025a51017303fa941361034d4d1..cdbc1d2c48f684d18f8c7c4190ade3dbc2c7b9fc 100644 --- a/arch/arm64/configs/vendor/sdmshrike_defconfig +++ b/arch/arm64/configs/vendor/sdmshrike_defconfig @@ -273,7 +273,6 @@ CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=8192 CONFIG_BLK_DEV_NVME=y CONFIG_NVME_QCOM=y -CONFIG_HDCP_QSEECOM=y CONFIG_QSEECOM=y CONFIG_PROFILER=m CONFIG_UID_SYS_STATS=y diff --git a/arch/arm64/configs/vendor/sdmsteppe-auto-perf_defconfig b/arch/arm64/configs/vendor/sdmsteppe-auto-perf_defconfig index 8b2552ecae59aa41da0359f414c2ad4d1ff26df1..4ae3b693dabc8fb0344fc699c554a003702968b1 100644 --- a/arch/arm64/configs/vendor/sdmsteppe-auto-perf_defconfig +++ b/arch/arm64/configs/vendor/sdmsteppe-auto-perf_defconfig @@ -268,7 +268,6 @@ CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_LOOP_MIN_COUNT=16 CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=8192 -CONFIG_HDCP_QSEECOM=y CONFIG_QSEECOM=y CONFIG_PROFILER=m CONFIG_UID_SYS_STATS=y diff --git a/arch/arm64/configs/vendor/sdmsteppe-auto_defconfig b/arch/arm64/configs/vendor/sdmsteppe-auto_defconfig index b1c0eaa34affa60fd486a679f4199504e65ae29a..e9a17512ab20dfd172bf33af77245a861b87ec0d 100644 --- a/arch/arm64/configs/vendor/sdmsteppe-auto_defconfig +++ b/arch/arm64/configs/vendor/sdmsteppe-auto_defconfig @@ -278,7 +278,6 @@ CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_LOOP_MIN_COUNT=16 CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=8192 -CONFIG_HDCP_QSEECOM=y CONFIG_QSEECOM=y CONFIG_PROFILER=m CONFIG_UID_SYS_STATS=y diff --git a/arch/arm64/configs/vendor/sm8150-perf_defconfig b/arch/arm64/configs/vendor/sm8150-perf_defconfig index e33a5b83e5e00e6c545db41ba62d2534d7df8299..b09131f55f949a1c886507e6ab41e2d7bb955727 100644 --- a/arch/arm64/configs/vendor/sm8150-perf_defconfig +++ b/arch/arm64/configs/vendor/sm8150-perf_defconfig @@ -726,6 +726,7 @@ CONFIG_CRYPTO_ANSI_CPRNG=y CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y CONFIG_CRYPTO_DEV_QCRYPTO=y CONFIG_CRYPTO_DEV_QCEDEV=y +CONFIG_CRYPTO_DEV_QCOM_ICE=y CONFIG_ARM64_CRYPTO=y CONFIG_CRYPTO_SHA1_ARM64_CE=y CONFIG_CRYPTO_SHA2_ARM64_CE=y diff --git a/arch/arm64/configs/vendor/sm8150_defconfig b/arch/arm64/configs/vendor/sm8150_defconfig index 87f0133ad2f8099a9267d6b0e3f7b238fbde2aae..b29c7fa37aa847ce6048bbcf39b1e9f1e35f2047 100644 --- a/arch/arm64/configs/vendor/sm8150_defconfig +++ b/arch/arm64/configs/vendor/sm8150_defconfig @@ -811,6 +811,7 @@ CONFIG_CRYPTO_ANSI_CPRNG=y CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y CONFIG_CRYPTO_DEV_QCRYPTO=y CONFIG_CRYPTO_DEV_QCEDEV=y +CONFIG_CRYPTO_DEV_QCOM_ICE=y CONFIG_ARM64_CRYPTO=y CONFIG_CRYPTO_SHA1_ARM64_CE=y CONFIG_CRYPTO_SHA2_ARM64_CE=y diff --git a/arch/arm64/configs/vendor/trinket-perf_defconfig b/arch/arm64/configs/vendor/trinket-perf_defconfig index f464b3d2e72de0544454aa2ebf44462c9d2003c2..bf10f7e5461e9ba93fdc6f180ad8376ab669b9b6 100644 --- a/arch/arm64/configs/vendor/trinket-perf_defconfig +++ b/arch/arm64/configs/vendor/trinket-perf_defconfig @@ -704,6 +704,7 @@ CONFIG_CRYPTO_ANSI_CPRNG=y CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y CONFIG_CRYPTO_DEV_QCRYPTO=y CONFIG_CRYPTO_DEV_QCEDEV=y +CONFIG_CRYPTO_DEV_QCOM_ICE=y CONFIG_ARM64_CRYPTO=y CONFIG_CRYPTO_SHA1_ARM64_CE=y CONFIG_CRYPTO_SHA2_ARM64_CE=y diff --git a/arch/arm64/configs/vendor/trinket_defconfig b/arch/arm64/configs/vendor/trinket_defconfig index f7944948aecd0f28452b43adc7efd3ff926f2f5d..456b1e7520bee11b8dae0689ff618a6371c88218 100644 --- a/arch/arm64/configs/vendor/trinket_defconfig +++ b/arch/arm64/configs/vendor/trinket_defconfig @@ -790,6 +790,7 @@ CONFIG_CRYPTO_ANSI_CPRNG=y CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y CONFIG_CRYPTO_DEV_QCRYPTO=y CONFIG_CRYPTO_DEV_QCEDEV=y +CONFIG_CRYPTO_DEV_QCOM_ICE=y CONFIG_ARM64_CRYPTO=y CONFIG_CRYPTO_SHA1_ARM64_CE=y CONFIG_CRYPTO_SHA2_ARM64_CE=y diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h index 790d02358f70053f250addab83ac7bcbb8dfd534..6bd2ab9746f8f6860865c3241c2c028004d82cc2 100644 --- a/arch/arm64/include/asm/debug-monitors.h +++ b/arch/arm64/include/asm/debug-monitors.h @@ -120,6 +120,8 @@ void disable_debug_monitors(enum dbg_active_el el); void user_rewind_single_step(struct task_struct *task); void user_fastforward_single_step(struct task_struct *task); +void user_regs_reset_single_step(struct user_pt_regs *regs, + struct task_struct *task); void kernel_enable_single_step(struct pt_regs *regs); void kernel_disable_single_step(void); diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index f982c9d1d10bbf55978c61402ce7aa5604bf44b1..87615facf9596162c07fc81886009ac2f20b0c71 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -292,8 +292,10 @@ struct kvm_vcpu_arch { * CP14 and CP15 live in the same array, as they are backed by the * same system registers. */ -#define vcpu_cp14(v,r) ((v)->arch.ctxt.copro[(r)]) -#define vcpu_cp15(v,r) ((v)->arch.ctxt.copro[(r)]) +#define CPx_BIAS IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) + +#define vcpu_cp14(v,r) ((v)->arch.ctxt.copro[(r) ^ CPx_BIAS]) +#define vcpu_cp15(v,r) ((v)->arch.ctxt.copro[(r) ^ CPx_BIAS]) #ifdef CONFIG_CPU_BIG_ENDIAN #define vcpu_cp15_64_high(v,r) vcpu_cp15((v),(r)) diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h index 26efe251f0763a3a7ac824437f11fb996f80f6d6..6415677ffe8d55164a2f19a37d84932d540b65ef 100644 --- a/arch/arm64/include/asm/pgtable-prot.h +++ b/arch/arm64/include/asm/pgtable-prot.h @@ -65,7 +65,7 @@ #define PAGE_HYP __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_HYP_XN) #define PAGE_HYP_EXEC __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY) #define PAGE_HYP_RO __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY | PTE_HYP_XN) -#define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP) +#define PAGE_HYP_DEVICE __pgprot(_PROT_DEFAULT | PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_HYP | PTE_HYP_XN) #define PAGE_S2 __pgprot(_PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY) #define PAGE_S2_DEVICE __pgprot(_PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN) diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c index 77b36c88eee5823c8899427fec2ebb625ad49b4c..a0a939278f31a07a25ef7542e61d89b1136fc88b 100644 --- a/arch/arm64/kernel/alternative.c +++ b/arch/arm64/kernel/alternative.c @@ -44,20 +44,8 @@ struct alt_region { */ static bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc) { - unsigned long replptr; - - if (kernel_text_address(pc)) - return 1; - - replptr = (unsigned long)ALT_REPL_PTR(alt); - if (pc >= replptr && pc <= (replptr + alt->alt_len)) - return 0; - - /* - * Branching into *another* alternate sequence is doomed, and - * we're not even trying to fix it up. - */ - BUG(); + unsigned long replptr = (unsigned long)ALT_REPL_PTR(alt); + return !(pc >= replptr && pc <= (replptr + alt->alt_len)); } #define align_down(x, a) ((unsigned long)(x) & ~(((unsigned long)(a)) - 1)) diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c index edb63bf2ac1ca3eae6088613a12e2d02c8e28c3b..2ccd0a99d8b35252bab9829504cf0dfa011f76e7 100644 --- a/arch/arm64/kernel/debug-monitors.c +++ b/arch/arm64/kernel/debug-monitors.c @@ -150,17 +150,20 @@ postcore_initcall(debug_monitors_init); /* * Single step API and exception handling. */ -static void set_regs_spsr_ss(struct pt_regs *regs) +static void set_user_regs_spsr_ss(struct user_pt_regs *regs) { regs->pstate |= DBG_SPSR_SS; } -NOKPROBE_SYMBOL(set_regs_spsr_ss); +NOKPROBE_SYMBOL(set_user_regs_spsr_ss); -static void clear_regs_spsr_ss(struct pt_regs *regs) +static void clear_user_regs_spsr_ss(struct user_pt_regs *regs) { regs->pstate &= ~DBG_SPSR_SS; } -NOKPROBE_SYMBOL(clear_regs_spsr_ss); +NOKPROBE_SYMBOL(clear_user_regs_spsr_ss); + +#define set_regs_spsr_ss(r) set_user_regs_spsr_ss(&(r)->user_regs) +#define clear_regs_spsr_ss(r) clear_user_regs_spsr_ss(&(r)->user_regs) /* EL1 Single Step Handler hooks */ static LIST_HEAD(step_hook); @@ -386,17 +389,26 @@ void user_rewind_single_step(struct task_struct *task) * If single step is active for this thread, then set SPSR.SS * to 1 to avoid returning to the active-pending state. */ - if (test_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP)) + if (test_tsk_thread_flag(task, TIF_SINGLESTEP)) set_regs_spsr_ss(task_pt_regs(task)); } NOKPROBE_SYMBOL(user_rewind_single_step); void user_fastforward_single_step(struct task_struct *task) { - if (test_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP)) + if (test_tsk_thread_flag(task, TIF_SINGLESTEP)) clear_regs_spsr_ss(task_pt_regs(task)); } +void user_regs_reset_single_step(struct user_pt_regs *regs, + struct task_struct *task) +{ + if (test_tsk_thread_flag(task, TIF_SINGLESTEP)) + set_user_regs_spsr_ss(regs); + else + clear_user_regs_spsr_ss(regs); +} + /* Kernel API */ void kernel_enable_single_step(struct pt_regs *regs) { diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c index f9faa53f12ea5113ed14a22379ea02921813f148..475d94efda06bbd4742f6cfe1cc91d2b6ba22c8b 100644 --- a/arch/arm64/kernel/hw_breakpoint.c +++ b/arch/arm64/kernel/hw_breakpoint.c @@ -738,6 +738,27 @@ static u64 get_distance_from_watchpoint(unsigned long addr, u64 val, return 0; } +static int watchpoint_report(struct perf_event *wp, unsigned long addr, + struct pt_regs *regs) +{ + int step = is_default_overflow_handler(wp); + struct arch_hw_breakpoint *info = counter_arch_bp(wp); + + info->trigger = addr; + + /* + * If we triggered a user watchpoint from a uaccess routine, then + * handle the stepping ourselves since userspace really can't help + * us with this. + */ + if (!user_mode(regs) && info->ctrl.privilege == AARCH64_BREAKPOINT_EL0) + step = 1; + else + perf_bp_event(wp, regs); + + return step; +} + static int watchpoint_handler(unsigned long addr, unsigned int esr, struct pt_regs *regs) { @@ -747,7 +768,6 @@ static int watchpoint_handler(unsigned long addr, unsigned int esr, u64 val; struct perf_event *wp, **slots; struct debug_info *debug_info; - struct arch_hw_breakpoint *info; struct arch_hw_breakpoint_ctrl ctrl; slots = this_cpu_ptr(wp_on_reg); @@ -785,25 +805,13 @@ static int watchpoint_handler(unsigned long addr, unsigned int esr, if (dist != 0) continue; - info = counter_arch_bp(wp); - info->trigger = addr; - perf_bp_event(wp, regs); - - /* Do we need to handle the stepping? */ - if (is_default_overflow_handler(wp)) - step = 1; + step = watchpoint_report(wp, addr, regs); } - if (min_dist > 0 && min_dist != -1) { - /* No exact match found. */ - wp = slots[closest_match]; - info = counter_arch_bp(wp); - info->trigger = addr; - perf_bp_event(wp, regs); - /* Do we need to handle the stepping? */ - if (is_default_overflow_handler(wp)) - step = 1; - } + /* No exact match found? */ + if (min_dist > 0 && min_dist != -1) + step = watchpoint_report(slots[closest_match], addr, regs); + rcu_read_unlock(); if (!step) diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c index 470afb3a04ca92f06876ebe150516993e2feef40..7fd7a9cd86161fa54e0ca5234a9d76c34b5168ce 100644 --- a/arch/arm64/kernel/kgdb.c +++ b/arch/arm64/kernel/kgdb.c @@ -258,7 +258,7 @@ static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr) if (user_mode(regs) || !kgdb_single_step) return DBG_HOOK_ERROR; - kgdb_handle_exception(1, SIGTRAP, 0, regs); + kgdb_handle_exception(0, SIGTRAP, 0, regs); return DBG_HOOK_HANDLED; } NOKPROBE_SYMBOL(kgdb_step_brk_fn); diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c index bfc4deac14455fcfd15a62e8b9612b666b2b8573..696dab2e4d5e156c66992af7126f8517462d3bf8 100644 --- a/arch/arm64/kernel/machine_kexec.c +++ b/arch/arm64/kernel/machine_kexec.c @@ -184,7 +184,8 @@ void machine_kexec(struct kimage *kimage) /* Flush the reboot_code_buffer in preparation for its execution. */ __flush_dcache_area(reboot_code_buffer, arm64_relocate_new_kernel_size); flush_icache_range((uintptr_t)reboot_code_buffer, - arm64_relocate_new_kernel_size); + (uintptr_t)reboot_code_buffer + + arm64_relocate_new_kernel_size); /* Flush the kimage list and its buffers. */ kexec_list_flush(kimage); diff --git a/arch/arm64/kernel/perf_regs.c b/arch/arm64/kernel/perf_regs.c index 1d091d048d046cc6a5fc66debbba8e21c2c873a6..0819db91ca9471f4650f3cc2cc2b6d0f0d8254d0 100644 --- a/arch/arm64/kernel/perf_regs.c +++ b/arch/arm64/kernel/perf_regs.c @@ -15,15 +15,34 @@ u64 perf_reg_value(struct pt_regs *regs, int idx) return 0; /* - * Compat (i.e. 32 bit) mode: - * - PC has been set in the pt_regs struct in kernel_entry, - * - Handle SP and LR here. + * Our handling of compat tasks (PERF_SAMPLE_REGS_ABI_32) is weird, but + * we're stuck with it for ABI compatability reasons. + * + * For a 32-bit consumer inspecting a 32-bit task, then it will look at + * the first 16 registers (see arch/arm/include/uapi/asm/perf_regs.h). + * These correspond directly to a prefix of the registers saved in our + * 'struct pt_regs', with the exception of the PC, so we copy that down + * (x15 corresponds to SP_hyp in the architecture). + * + * So far, so good. + * + * The oddity arises when a 64-bit consumer looks at a 32-bit task and + * asks for registers beyond PERF_REG_ARM_MAX. In this case, we return + * SP_usr, LR_usr and PC in the positions where the AArch64 SP, LR and + * PC registers would normally live. The initial idea was to allow a + * 64-bit unwinder to unwind a 32-bit task and, although it's not clear + * how well that works in practice, somebody might be relying on it. + * + * At the time we make a sample, we don't know whether the consumer is + * 32-bit or 64-bit, so we have to cater for both possibilities. */ if (compat_user_mode(regs)) { if ((u32)idx == PERF_REG_ARM64_SP) return regs->compat_sp; if ((u32)idx == PERF_REG_ARM64_LR) return regs->compat_lr; + if (idx == 15) + return regs->pc; } if ((u32)idx == PERF_REG_ARM64_SP) diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index e8574b95bda8693a2652ed240ebe8448d19691be..947dbe5be43f1beeba68966acfb60633ed92b849 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -1496,8 +1496,8 @@ static int valid_native_regs(struct user_pt_regs *regs) */ int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task) { - if (!test_tsk_thread_flag(task, TIF_SINGLESTEP)) - regs->pstate &= ~DBG_SPSR_SS; + /* https://lore.kernel.org/lkml/20191118131525.GA4180@willie-the-truck */ + user_regs_reset_single_step(regs, task); if (is_compat_thread(task_thread_info(task))) return valid_compat_regs(regs); diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile index f7620d315bc5a7d208997d8b3d14022d19c88309..cd3cdd4d46b4d532b796a60a6f65e31f43658c0a 100644 --- a/arch/arm64/kernel/vdso/Makefile +++ b/arch/arm64/kernel/vdso/Makefile @@ -12,9 +12,8 @@ obj-vdso := gettimeofday.o note.o sigreturn.o targets := $(obj-vdso) vdso.so vdso.so.dbg obj-vdso := $(addprefix $(obj)/, $(obj-vdso)) -ccflags-y := -shared -fno-common -fno-builtin -ccflags-y += -nostdlib -Wl,-soname=linux-vdso.so.1 \ - $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) +ldflags-y := -shared -nostdlib -soname=linux-vdso.so.1 --hash-style=sysv \ + --build-id -n -T ccflags-y += $(DISABLE_LTO) CFLAGS_REMOVE_vgettimeofday.o += $(CC_FLAGS_SCS) @@ -22,10 +21,6 @@ CFLAGS_REMOVE_vgettimeofday.o += $(CC_FLAGS_SCS) # Disable gcov profiling for VDSO code GCOV_PROFILE := n -# Workaround for bare-metal (ELF) toolchains that neglect to pass -shared -# down to collect2, resulting in silent corruption of the vDSO image. -ccflags-y += -Wl,-shared - obj-y += vdso.o extra-y += vdso.lds CPPFLAGS_vdso.lds += -P -C -U$(ARCH) @@ -35,7 +30,7 @@ $(obj)/vdso.o : $(obj)/vdso.so # Link rule for the .so file, .lds has to be first $(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) - $(call if_changed,vdsold) + $(call if_changed,ld) # Strip rule for the .so file $(obj)/%.so: OBJCOPYFLAGS := -S @@ -57,8 +52,6 @@ $(obj-vdso): %.o: %.S FORCE $(call if_changed_dep,vdsoas) # Actual build commands -quiet_cmd_vdsold = VDSOL $@ - cmd_vdsold = $(CC) $(c_flags) -Wl,-n -Wl,-T $^ -o $@ quiet_cmd_vdsoas = VDSOA $@ cmd_vdsoas = $(CC) $(a_flags) -c -o $@ $< diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index 796d8c55b2189555848654d26dc30c12f792dbdb..0b6d21167a02518d4518675de74bab32ec8b66b6 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -161,7 +161,7 @@ SECTIONS __alt_instructions_end = .; } .altinstr_replacement : { - KEEP(*(.altinstr_replacement)) + *(.altinstr_replacement) } . = ALIGN(PAGE_SIZE); diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S index dea20651a5f167e70f1d38b9728d263a0c78eb58..cb28c12d0a64b91cff92c9268f06d1c2f5344204 100644 --- a/arch/arm64/kvm/hyp-init.S +++ b/arch/arm64/kvm/hyp-init.S @@ -147,11 +147,15 @@ ENTRY(__kvm_handle_stub_hvc) 1: cmp x0, #HVC_RESET_VECTORS b.ne 1f -reset: + /* - * Reset kvm back to the hyp stub. Do not clobber x0-x4 in - * case we coming via HVC_SOFT_RESTART. + * Set the HVC_RESET_VECTORS return code before entering the common + * path so that we do not clobber x0-x2 in case we are coming via + * HVC_SOFT_RESTART. */ + mov x0, xzr +reset: + /* Reset kvm back to the hyp stub. */ mrs x5, sctlr_el2 ldr x6, =SCTLR_ELx_FLAGS bic x5, x5, x6 // Clear SCTL_M and etc @@ -162,7 +166,6 @@ reset: /* Install stub vectors */ adr_l x5, __hyp_stub_vectors msr vbar_el2, x5 - mov x0, xzr eret 1: /* Bad stub call */ diff --git a/arch/m68k/coldfire/pci.c b/arch/m68k/coldfire/pci.c index 3097fa2ca746729d842a5dbb8d98ea2810afb78d..1e428d18d268a923d0bf7837b1c4a1b94e341ff0 100644 --- a/arch/m68k/coldfire/pci.c +++ b/arch/m68k/coldfire/pci.c @@ -316,8 +316,10 @@ static int __init mcf_pci_init(void) /* Keep a virtual mapping to IO/config space active */ iospace = (unsigned long) ioremap(PCI_IO_PA, PCI_IO_SIZE); - if (iospace == 0) + if (iospace == 0) { + pci_free_host_bridge(bridge); return -ENODEV; + } pr_info("Coldfire: PCI IO/config window mapped to 0x%x\n", (u32) iospace); diff --git a/arch/m68k/include/asm/mac_via.h b/arch/m68k/include/asm/mac_via.h index de1470c4d829bb23c10e036147ef7654c31ab1f5..1149251ea58d26d716f470c8f387993ecac6a98c 100644 --- a/arch/m68k/include/asm/mac_via.h +++ b/arch/m68k/include/asm/mac_via.h @@ -257,6 +257,7 @@ extern int rbv_present,via_alt_mapping; struct irq_desc; +extern void via_l2_flush(int writeback); extern void via_register_interrupts(void); extern void via_irq_enable(int); extern void via_irq_disable(int); diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c index 2004b3f72d804a3fa61667d41405fd374b9bf3da..3ea7450c51f2a94768e9ce60cd6a679c3a0c56e2 100644 --- a/arch/m68k/mac/config.c +++ b/arch/m68k/mac/config.c @@ -61,7 +61,6 @@ extern void iop_preinit(void); extern void iop_init(void); extern void via_init(void); extern void via_init_clock(irq_handler_t func); -extern void via_flush_cache(void); extern void oss_init(void); extern void psc_init(void); extern void baboon_init(void); @@ -132,21 +131,6 @@ int __init mac_parse_bootinfo(const struct bi_record *record) return unknown; } -/* - * Flip into 24bit mode for an instant - flushes the L2 cache card. We - * have to disable interrupts for this. Our IRQ handlers will crap - * themselves if they take an IRQ in 24bit mode! - */ - -static void mac_cache_card_flush(int writeback) -{ - unsigned long flags; - - local_irq_save(flags); - via_flush_cache(); - local_irq_restore(flags); -} - void __init config_mac(void) { if (!MACH_IS_MAC) @@ -179,9 +163,8 @@ void __init config_mac(void) * not. */ - if (macintosh_config->ident == MAC_MODEL_IICI - || macintosh_config->ident == MAC_MODEL_IIFX) - mach_l2_flush = mac_cache_card_flush; + if (macintosh_config->ident == MAC_MODEL_IICI) + mach_l2_flush = via_l2_flush; } diff --git a/arch/m68k/mac/via.c b/arch/m68k/mac/via.c index 863806e6775a8ce6a62a55106d84a681bce49f80..6ab6a1d54b378ed2e6eeeecf4c6fc5a4a4e3b6e8 100644 --- a/arch/m68k/mac/via.c +++ b/arch/m68k/mac/via.c @@ -300,10 +300,14 @@ void via_debug_dump(void) * the system into 24-bit mode for an instant. */ -void via_flush_cache(void) +void via_l2_flush(int writeback) { + unsigned long flags; + + local_irq_save(flags); via2[gBufB] &= ~VIA2B_vMode32; via2[gBufB] |= VIA2B_vMode32; + local_irq_restore(flags); } /* diff --git a/arch/mips/Makefile b/arch/mips/Makefile index 5977884b008e6d798755dbf98ae5d29fda1b015c..a4a06d173858808755b45a85093e62f8e1cb60eb 100644 --- a/arch/mips/Makefile +++ b/arch/mips/Makefile @@ -279,12 +279,23 @@ ifdef CONFIG_64BIT endif endif +# When linking a 32-bit executable the LLVM linker cannot cope with a +# 32-bit load address that has been sign-extended to 64 bits. Simply +# remove the upper 32 bits then, as it is safe to do so with other +# linkers. +ifdef CONFIG_64BIT + load-ld = $(load-y) +else + load-ld = $(subst 0xffffffff,0x,$(load-y)) +endif + KBUILD_AFLAGS += $(cflags-y) KBUILD_CFLAGS += $(cflags-y) -KBUILD_CPPFLAGS += -DVMLINUX_LOAD_ADDRESS=$(load-y) +KBUILD_CPPFLAGS += -DVMLINUX_LOAD_ADDRESS=$(load-y) -DLINKER_LOAD_ADDRESS=$(load-ld) KBUILD_CPPFLAGS += -DDATAOFFSET=$(if $(dataoffset-y),$(dataoffset-y),0) bootvars-y = VMLINUX_LOAD_ADDRESS=$(load-y) \ + LINKER_LOAD_ADDRESS=$(load-ld) \ VMLINUX_ENTRY_ADDRESS=$(entry-y) \ PLATFORM="$(platform-y)" \ ITS_INPUTS="$(its-y)" diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile index baa34e4deb78014394aa7edab2952ccc00e7d428..516e593a8ee9d64bcbf13914a5eb5b8004f78344 100644 --- a/arch/mips/boot/compressed/Makefile +++ b/arch/mips/boot/compressed/Makefile @@ -87,7 +87,7 @@ ifneq ($(zload-y),) VMLINUZ_LOAD_ADDRESS := $(zload-y) else VMLINUZ_LOAD_ADDRESS = $(shell $(obj)/calc_vmlinuz_load_addr \ - $(obj)/vmlinux.bin $(VMLINUX_LOAD_ADDRESS)) + $(obj)/vmlinux.bin $(LINKER_LOAD_ADDRESS)) endif UIMAGE_LOADADDR = $(VMLINUZ_LOAD_ADDRESS) diff --git a/arch/mips/configs/loongson3_defconfig b/arch/mips/configs/loongson3_defconfig index 324dfee23dfb24436382fe2f46c40a83095b4c20..c871e40b8878b3f5f22a86a6023a6bf50d71ac4d 100644 --- a/arch/mips/configs/loongson3_defconfig +++ b/arch/mips/configs/loongson3_defconfig @@ -250,7 +250,7 @@ CONFIG_MEDIA_CAMERA_SUPPORT=y CONFIG_MEDIA_USB_SUPPORT=y CONFIG_USB_VIDEO_CLASS=m CONFIG_DRM=y -CONFIG_DRM_RADEON=y +CONFIG_DRM_RADEON=m CONFIG_FB_RADEON=y CONFIG_LCD_CLASS_DEVICE=y CONFIG_LCD_PLATFORM=m diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index 673049bf29b634eff4cddc42934d6071b843f0ad..f21dd4cb33ad4fbdf011feabd6f81b2644222ea0 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h @@ -274,8 +274,12 @@ enum emulation_result { #define MIPS3_PG_SHIFT 6 #define MIPS3_PG_FRAME 0x3fffffc0 +#if defined(CONFIG_64BIT) +#define VPN2_MASK GENMASK(cpu_vmbits - 1, 13) +#else #define VPN2_MASK 0xffffe000 -#define KVM_ENTRYHI_ASID MIPS_ENTRYHI_ASID +#endif +#define KVM_ENTRYHI_ASID cpu_asid_mask(&boot_cpu_data) #define TLB_IS_GLOBAL(x) ((x).tlb_lo[0] & (x).tlb_lo[1] & ENTRYLO_G) #define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK) #define TLB_ASID(x) ((x).tlb_hi & KVM_ENTRYHI_ASID) diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h index a6810923b3f0214dfa36eab4a28320a65bba7790..a7f9acb4203474421ed8f7ada0ae228f5091b307 100644 --- a/arch/mips/include/asm/mipsregs.h +++ b/arch/mips/include/asm/mipsregs.h @@ -737,7 +737,7 @@ /* MAAR bit definitions */ #define MIPS_MAAR_VH (_U64CAST_(1) << 63) -#define MIPS_MAAR_ADDR ((BIT_ULL(BITS_PER_LONG - 12) - 1) << 12) +#define MIPS_MAAR_ADDR GENMASK_ULL(55, 12) #define MIPS_MAAR_ADDR_SHIFT 12 #define MIPS_MAAR_S (_ULCAST_(1) << 1) #define MIPS_MAAR_VL (_ULCAST_(1) << 0) diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S index 37b9383eacd3e84872b1d2d7d1361cf1b637522f..cf74a963839f4c7e9c88da033e70249e4ec40a07 100644 --- a/arch/mips/kernel/genex.S +++ b/arch/mips/kernel/genex.S @@ -431,20 +431,20 @@ NESTED(nmi_handler, PT_SIZE, sp) .endm .macro __build_clear_fpe + CLI + TRACE_IRQS_OFF .set push /* gas fails to assemble cfc1 for some archs (octeon).*/ \ .set mips1 SET_HARDFLOAT cfc1 a1, fcr31 .set pop - CLI - TRACE_IRQS_OFF .endm .macro __build_clear_msa_fpe - _cfcmsa a1, MSA_CSR CLI TRACE_IRQS_OFF + _cfcmsa a1, MSA_CSR .endm .macro __build_clear_ade diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c index 7f3f136572decc82d666d0fca0515f38d15dd2b3..50d3d74001cbecac8b6fbcd9cfe4e6691d862323 100644 --- a/arch/mips/kernel/mips-cm.c +++ b/arch/mips/kernel/mips-cm.c @@ -123,9 +123,9 @@ static char *cm2_causes[32] = { "COH_RD_ERR", "MMIO_WR_ERR", "MMIO_RD_ERR", "0x07", "0x08", "0x09", "0x0a", "0x0b", "0x0c", "0x0d", "0x0e", "0x0f", - "0x10", "0x11", "0x12", "0x13", - "0x14", "0x15", "0x16", "INTVN_WR_ERR", - "INTVN_RD_ERR", "0x19", "0x1a", "0x1b", + "0x10", "INTVN_WR_ERR", "INTVN_RD_ERR", "0x13", + "0x14", "0x15", "0x16", "0x17", + "0x18", "0x19", "0x1a", "0x1b", "0x1c", "0x1d", "0x1e", "0x1f" }; diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index 05ed4ed411c733ad27310a0a2acb4da1df080f2d..abd7ee9e90ab0dd0d01d970c84d90a2c4edf609b 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -911,7 +911,17 @@ static void __init arch_mem_init(char **cmdline_p) BOOTMEM_DEFAULT); #endif device_tree_init(); + + /* + * In order to reduce the possibility of kernel panic when failed to + * get IO TLB memory under CONFIG_SWIOTLB, it is better to allocate + * low memory as small as possible before plat_swiotlb_setup(), so + * make sparse_init() using top-down allocation. + */ + memblock_set_bottom_up(false); sparse_init(); + memblock_set_bottom_up(true); + plat_swiotlb_setup(); dma_contiguous_reserve(PFN_PHYS(max_low_pfn)); diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c index a6ebc8135112e5c9c22e5598bf25556469f9dbb4..79ebf349aab4abe3118705fde985caaeae75b59a 100644 --- a/arch/mips/kernel/time.c +++ b/arch/mips/kernel/time.c @@ -22,12 +22,77 @@ #include #include #include +#include +#include #include #include #include #include +#ifdef CONFIG_CPU_FREQ + +static DEFINE_PER_CPU(unsigned long, pcp_lpj_ref); +static DEFINE_PER_CPU(unsigned long, pcp_lpj_ref_freq); +static unsigned long glb_lpj_ref; +static unsigned long glb_lpj_ref_freq; + +static int cpufreq_callback(struct notifier_block *nb, + unsigned long val, void *data) +{ + int cpu; + struct cpufreq_freqs *freq = data; + + /* + * Skip lpj numbers adjustment if the CPU-freq transition is safe for + * the loops delay. (Is this possible?) + */ + if (freq->flags & CPUFREQ_CONST_LOOPS) + return NOTIFY_OK; + + /* Save the initial values of the lpjes for future scaling. */ + if (!glb_lpj_ref) { + glb_lpj_ref = boot_cpu_data.udelay_val; + glb_lpj_ref_freq = freq->old; + + for_each_online_cpu(cpu) { + per_cpu(pcp_lpj_ref, cpu) = + cpu_data[cpu].udelay_val; + per_cpu(pcp_lpj_ref_freq, cpu) = freq->old; + } + } + + cpu = freq->cpu; + /* + * Adjust global lpj variable and per-CPU udelay_val number in + * accordance with the new CPU frequency. + */ + if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) || + (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) { + loops_per_jiffy = cpufreq_scale(glb_lpj_ref, + glb_lpj_ref_freq, + freq->new); + + cpu_data[cpu].udelay_val = cpufreq_scale(per_cpu(pcp_lpj_ref, cpu), + per_cpu(pcp_lpj_ref_freq, cpu), freq->new); + } + + return NOTIFY_OK; +} + +static struct notifier_block cpufreq_notifier = { + .notifier_call = cpufreq_callback, +}; + +static int __init register_cpufreq_notifier(void) +{ + return cpufreq_register_notifier(&cpufreq_notifier, + CPUFREQ_TRANSITION_NOTIFIER); +} +core_initcall(register_cpufreq_notifier); + +#endif /* CONFIG_CPU_FREQ */ + /* * forward reference */ diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 583aed9069337c15a99a9f0e34eda9520cfa4d43..4a23d89e251cfe6d5d27affd6fb8d286dc6e75df 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -2135,6 +2135,7 @@ static void configure_status(void) change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX, status_set); + back_to_back_c0_hazard(); } unsigned int hwrena; diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S index 36f2e860ba3eade90e3c1f535c18713d257ebaff..be63fff95b2abb64c8e766807927d823b3bee940 100644 --- a/arch/mips/kernel/vmlinux.lds.S +++ b/arch/mips/kernel/vmlinux.lds.S @@ -50,7 +50,7 @@ SECTIONS /* . = 0xa800000000300000; */ . = 0xffffffff80300000; #endif - . = VMLINUX_LOAD_ADDRESS; + . = LINKER_LOAD_ADDRESS; /* read-only */ _text = .; /* Text and read-only data */ .text : { diff --git a/arch/openrisc/include/asm/uaccess.h b/arch/openrisc/include/asm/uaccess.h index bbf5c79cce7a30832422c1a290e895074fab1042..8b204cd1f531d1c7b5a03c94321ad725968604c4 100644 --- a/arch/openrisc/include/asm/uaccess.h +++ b/arch/openrisc/include/asm/uaccess.h @@ -58,8 +58,12 @@ /* Ensure that addr is below task's addr_limit */ #define __addr_ok(addr) ((unsigned long) addr < get_fs()) -#define access_ok(type, addr, size) \ - __range_ok((unsigned long)addr, (unsigned long)size) +#define access_ok(type, addr, size) \ +({ \ + unsigned long __ao_addr = (unsigned long)(addr); \ + unsigned long __ao_size = (unsigned long)(size); \ + __range_ok(__ao_addr, __ao_size); \ +}) /* * These are the main single-value transfer routines. They automatically diff --git a/arch/openrisc/kernel/entry.S b/arch/openrisc/kernel/entry.S index 1107d34e45bf14d25f57757a567d8e7927d6a325..0fdfa7142f4b3b9d21018093462a9e42ad2ac2e6 100644 --- a/arch/openrisc/kernel/entry.S +++ b/arch/openrisc/kernel/entry.S @@ -1102,13 +1102,13 @@ ENTRY(__sys_clone) l.movhi r29,hi(sys_clone) l.ori r29,r29,lo(sys_clone) l.j _fork_save_extra_regs_and_call - l.addi r7,r1,0 + l.nop ENTRY(__sys_fork) l.movhi r29,hi(sys_fork) l.ori r29,r29,lo(sys_fork) l.j _fork_save_extra_regs_and_call - l.addi r3,r1,0 + l.nop ENTRY(sys_rt_sigreturn) l.jal _sys_rt_sigreturn diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h index bc54addd589f69daf4fb7aae0c23d47ea5c631a1..614bcc7673f59f59d384da4bf56f35ff0f220126 100644 --- a/arch/parisc/include/asm/atomic.h +++ b/arch/parisc/include/asm/atomic.h @@ -258,6 +258,8 @@ atomic64_set(atomic64_t *v, s64 i) _atomic_spin_unlock_irqrestore(v, flags); } +#define atomic64_set_release(v, i) atomic64_set((v), (i)) + static __inline__ s64 atomic64_read(const atomic64_t *v) { diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index cc700f7dda5442258ed6ccda86d0875530936897..8be075f81ce66df5f7f4f93c07e55c7abc745f75 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c @@ -608,7 +608,7 @@ void __init mem_init(void) > BITS_PER_LONG); high_memory = __va((max_pfn << PAGE_SHIFT)); - set_max_mapnr(page_to_pfn(virt_to_page(high_memory - 1)) + 1); + set_max_mapnr(max_low_pfn); free_all_bootmem(); #ifdef CONFIG_PA11 diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 277e4ffb928bc5f1bb8cc663ed9f8ac4c31fb84d..679e1e3c1695390699b7ffae7d7a5d964ff0ed62 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -141,12 +141,14 @@ config PPC select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE select ARCH_HAS_SG_CHAIN + select ARCH_HAS_STRICT_KERNEL_RWX if (PPC32 && !HIBERNATION) select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_HAS_UBSAN_SANITIZE_ALL select ARCH_HAS_ZONE_DEVICE if PPC_BOOK3S_64 select ARCH_HAVE_NMI_SAFE_CMPXCHG select ARCH_MIGHT_HAVE_PC_PARPORT select ARCH_MIGHT_HAVE_PC_SERIO + select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX select ARCH_SUPPORTS_ATOMIC_RMW select ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT select ARCH_USE_BUILTIN_BSWAP @@ -178,8 +180,6 @@ config PPC select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_TRACEHOOK - select ARCH_HAS_STRICT_KERNEL_RWX if ((PPC_BOOK3S_64 || PPC32) && !RELOCATABLE && !HIBERNATION) - select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX select HAVE_CBPF_JIT if !PPC64 select HAVE_CONTEXT_TRACKING if PPC64 select HAVE_DEBUG_KMEMLEAK diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index bcb79a96a6c836bd9b2c81ceb3579ed62e96befd..618ee2c0ed53dc91b9b89f54dee11fda5517f420 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -898,10 +898,25 @@ extern struct page *pgd_page(pgd_t pgd); #define pud_page_vaddr(pud) __va(pud_val(pud) & ~PUD_MASKED_BITS) #define pgd_page_vaddr(pgd) __va(pgd_val(pgd) & ~PGD_MASKED_BITS) -#define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & (PTRS_PER_PGD - 1)) -#define pud_index(address) (((address) >> (PUD_SHIFT)) & (PTRS_PER_PUD - 1)) -#define pmd_index(address) (((address) >> (PMD_SHIFT)) & (PTRS_PER_PMD - 1)) -#define pte_index(address) (((address) >> (PAGE_SHIFT)) & (PTRS_PER_PTE - 1)) +static inline unsigned long pgd_index(unsigned long address) +{ + return (address >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1); +} + +static inline unsigned long pud_index(unsigned long address) +{ + return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1); +} + +static inline unsigned long pmd_index(unsigned long address) +{ + return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1); +} + +static inline unsigned long pte_index(unsigned long address) +{ + return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); +} /* * Find an entry in a page-table-directory. We combine the address region diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c index 7ed2b1b6643cc9da9c826e473a19e1527f29244f..09134df01bfd1d05f7582a6b58155609358c94fd 100644 --- a/arch/powerpc/kernel/dt_cpu_ftrs.c +++ b/arch/powerpc/kernel/dt_cpu_ftrs.c @@ -385,6 +385,14 @@ static int __init feat_enable_dscr(struct dt_cpu_feature *f) { u64 lpcr; + /* + * Linux relies on FSCR[DSCR] being clear, so that we can take the + * facility unavailable interrupt and track the task's usage of DSCR. + * See facility_unavailable_exception(). + * Clear the bit here so that feat_enable() doesn't set it. + */ + f->fscr_bit_nr = -1; + feat_enable(f); lpcr = mfspr(SPRN_LPCR); diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c index 9dafd7af39b8f517b68ab254e1ec134fffd993e8..cb4d6cd949fc4e8a93e0e2c086e9d4e167cb5765 100644 --- a/arch/powerpc/kernel/machine_kexec.c +++ b/arch/powerpc/kernel/machine_kexec.c @@ -113,11 +113,12 @@ void machine_kexec(struct kimage *image) void __init reserve_crashkernel(void) { - unsigned long long crash_size, crash_base; + unsigned long long crash_size, crash_base, total_mem_sz; int ret; + total_mem_sz = memory_limit ? memory_limit : memblock_phys_mem_size(); /* use common parsing */ - ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(), + ret = parse_crashkernel(boot_command_line, total_mem_sz, &crash_size, &crash_base); if (ret == 0 && crash_size > 0) { crashk_res.start = crash_base; @@ -176,6 +177,7 @@ void __init reserve_crashkernel(void) /* Crash kernel trumps memory limit */ if (memory_limit && memory_limit <= crashk_res.end) { memory_limit = crashk_res.end + 1; + total_mem_sz = memory_limit; printk("Adjusted memory limit for crashkernel, now 0x%llx\n", memory_limit); } @@ -184,7 +186,7 @@ void __init reserve_crashkernel(void) "for crashkernel (System RAM: %ldMB)\n", (unsigned long)(crash_size >> 20), (unsigned long)(crashk_res.start >> 20), - (unsigned long)(memblock_phys_mem_size() >> 20)); + (unsigned long)(total_mem_sz >> 20)); if (!memblock_is_region_memory(crashk_res.start, crash_size) || memblock_reserve(crashk_res.start, crash_size)) { diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index d96b2841509047a307749c6b8c7955fca9899d67..bbe9c57dd1a368929abc250ed9d894b8af3807d8 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -658,6 +658,23 @@ static void __init early_reserve_mem(void) #endif } +#ifdef CONFIG_PPC64 +static void __init save_fscr_to_task(void) +{ + /* + * Ensure the init_task (pid 0, aka swapper) uses the value of FSCR we + * have configured via the device tree features or via __init_FSCR(). + * That value will then be propagated to pid 1 (init) and all future + * processes. + */ + if (early_cpu_has_feature(CPU_FTR_ARCH_207S)) + init_task.thread.fscr = mfspr(SPRN_FSCR); +} +#else +static inline void save_fscr_to_task(void) {}; +#endif + + void __init early_init_devtree(void *params) { phys_addr_t limit; @@ -743,6 +760,8 @@ void __init early_init_devtree(void *params) BUG(); } + save_fscr_to_task(); + #if defined(CONFIG_SMP) && defined(CONFIG_PPC64) /* We'll later wait for secondaries to check in; there are * NCPUS-1 non-boot CPUs :-) diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c index 72238eedc360fa1d24fa382c2d696097739d44c5..2bb798918483dc9029b59e2b0d33fa34d24925b3 100644 --- a/arch/powerpc/perf/hv-24x7.c +++ b/arch/powerpc/perf/hv-24x7.c @@ -1413,16 +1413,6 @@ static void h_24x7_event_read(struct perf_event *event) h24x7hw = &get_cpu_var(hv_24x7_hw); h24x7hw->events[i] = event; put_cpu_var(h24x7hw); - /* - * Clear the event count so we can compute the _change_ - * in the 24x7 raw counter value at the end of the txn. - * - * Note that we could alternatively read the 24x7 value - * now and save its value in event->hw.prev_count. But - * that would require issuing a hcall, which would then - * defeat the purpose of using the txn interface. - */ - local64_set(&event->count, 0); } put_cpu_var(hv_24x7_reqb); diff --git a/arch/powerpc/platforms/4xx/pci.c b/arch/powerpc/platforms/4xx/pci.c index 73e6b36bcd5125724a0c913d745e1a4bab9b1133..256943af58aaeeca75c6b4d1a4b61d4b721c6c46 100644 --- a/arch/powerpc/platforms/4xx/pci.c +++ b/arch/powerpc/platforms/4xx/pci.c @@ -1242,7 +1242,7 @@ static void __init ppc460sx_pciex_check_link(struct ppc4xx_pciex_port *port) if (mbase == NULL) { printk(KERN_ERR "%pOF: Can't map internal config space !", port->node); - goto done; + return; } while (attempt && (0 == (in_le32(mbase + PECFG_460SX_DLLSTA) @@ -1252,9 +1252,7 @@ static void __init ppc460sx_pciex_check_link(struct ppc4xx_pciex_port *port) } if (attempt) port->link = 1; -done: iounmap(mbase); - } static struct ppc4xx_pciex_hwops ppc460sx_pcie_hwops __initdata = { diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c index 5ffcdeb1eb17680cf9a02771a8248ef62fae34e0..9d9fffaedeef239448887c4a0cab19c8d4ecdff5 100644 --- a/arch/powerpc/platforms/cell/spufs/file.c +++ b/arch/powerpc/platforms/cell/spufs/file.c @@ -1988,8 +1988,9 @@ static ssize_t __spufs_mbox_info_read(struct spu_context *ctx, static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf, size_t len, loff_t *pos) { - int ret; struct spu_context *ctx = file->private_data; + u32 stat, data; + int ret; if (!access_ok(VERIFY_WRITE, buf, len)) return -EFAULT; @@ -1998,11 +1999,16 @@ static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf, if (ret) return ret; spin_lock(&ctx->csa.register_lock); - ret = __spufs_mbox_info_read(ctx, buf, len, pos); + stat = ctx->csa.prob.mb_stat_R; + data = ctx->csa.prob.pu_mb_R; spin_unlock(&ctx->csa.register_lock); spu_release_saved(ctx); - return ret; + /* EOF if there's no entry in the mbox */ + if (!(stat & 0x0000ff)) + return 0; + + return simple_read_from_buffer(buf, len, pos, &data, sizeof(data)); } static const struct file_operations spufs_mbox_info_fops = { @@ -2029,6 +2035,7 @@ static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf, size_t len, loff_t *pos) { struct spu_context *ctx = file->private_data; + u32 stat, data; int ret; if (!access_ok(VERIFY_WRITE, buf, len)) @@ -2038,11 +2045,16 @@ static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf, if (ret) return ret; spin_lock(&ctx->csa.register_lock); - ret = __spufs_ibox_info_read(ctx, buf, len, pos); + stat = ctx->csa.prob.mb_stat_R; + data = ctx->csa.priv2.puint_mb_R; spin_unlock(&ctx->csa.register_lock); spu_release_saved(ctx); - return ret; + /* EOF if there's no entry in the ibox */ + if (!(stat & 0xff0000)) + return 0; + + return simple_read_from_buffer(buf, len, pos, &data, sizeof(data)); } static const struct file_operations spufs_ibox_info_fops = { @@ -2051,6 +2063,11 @@ static const struct file_operations spufs_ibox_info_fops = { .llseek = generic_file_llseek, }; +static size_t spufs_wbox_info_cnt(struct spu_context *ctx) +{ + return (4 - ((ctx->csa.prob.mb_stat_R & 0x00ff00) >> 8)) * sizeof(u32); +} + static ssize_t __spufs_wbox_info_read(struct spu_context *ctx, char __user *buf, size_t len, loff_t *pos) { @@ -2059,7 +2076,7 @@ static ssize_t __spufs_wbox_info_read(struct spu_context *ctx, u32 wbox_stat; wbox_stat = ctx->csa.prob.mb_stat_R; - cnt = 4 - ((wbox_stat & 0x00ff00) >> 8); + cnt = spufs_wbox_info_cnt(ctx); for (i = 0; i < cnt; i++) { data[i] = ctx->csa.spu_mailbox_data[i]; } @@ -2072,7 +2089,8 @@ static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf, size_t len, loff_t *pos) { struct spu_context *ctx = file->private_data; - int ret; + u32 data[ARRAY_SIZE(ctx->csa.spu_mailbox_data)]; + int ret, count; if (!access_ok(VERIFY_WRITE, buf, len)) return -EFAULT; @@ -2081,11 +2099,13 @@ static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf, if (ret) return ret; spin_lock(&ctx->csa.register_lock); - ret = __spufs_wbox_info_read(ctx, buf, len, pos); + count = spufs_wbox_info_cnt(ctx); + memcpy(&data, &ctx->csa.spu_mailbox_data, sizeof(data)); spin_unlock(&ctx->csa.register_lock); spu_release_saved(ctx); - return ret; + return simple_read_from_buffer(buf, len, pos, &data, + count * sizeof(u32)); } static const struct file_operations spufs_wbox_info_fops = { @@ -2094,27 +2114,33 @@ static const struct file_operations spufs_wbox_info_fops = { .llseek = generic_file_llseek, }; -static ssize_t __spufs_dma_info_read(struct spu_context *ctx, - char __user *buf, size_t len, loff_t *pos) +static void spufs_get_dma_info(struct spu_context *ctx, + struct spu_dma_info *info) { - struct spu_dma_info info; - struct mfc_cq_sr *qp, *spuqp; int i; - info.dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW; - info.dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0]; - info.dma_info_status = ctx->csa.spu_chnldata_RW[24]; - info.dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25]; - info.dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27]; + info->dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW; + info->dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0]; + info->dma_info_status = ctx->csa.spu_chnldata_RW[24]; + info->dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25]; + info->dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27]; for (i = 0; i < 16; i++) { - qp = &info.dma_info_command_data[i]; - spuqp = &ctx->csa.priv2.spuq[i]; + struct mfc_cq_sr *qp = &info->dma_info_command_data[i]; + struct mfc_cq_sr *spuqp = &ctx->csa.priv2.spuq[i]; qp->mfc_cq_data0_RW = spuqp->mfc_cq_data0_RW; qp->mfc_cq_data1_RW = spuqp->mfc_cq_data1_RW; qp->mfc_cq_data2_RW = spuqp->mfc_cq_data2_RW; qp->mfc_cq_data3_RW = spuqp->mfc_cq_data3_RW; } +} + +static ssize_t __spufs_dma_info_read(struct spu_context *ctx, + char __user *buf, size_t len, loff_t *pos) +{ + struct spu_dma_info info; + + spufs_get_dma_info(ctx, &info); return simple_read_from_buffer(buf, len, pos, &info, sizeof info); @@ -2124,6 +2150,7 @@ static ssize_t spufs_dma_info_read(struct file *file, char __user *buf, size_t len, loff_t *pos) { struct spu_context *ctx = file->private_data; + struct spu_dma_info info; int ret; if (!access_ok(VERIFY_WRITE, buf, len)) @@ -2133,11 +2160,12 @@ static ssize_t spufs_dma_info_read(struct file *file, char __user *buf, if (ret) return ret; spin_lock(&ctx->csa.register_lock); - ret = __spufs_dma_info_read(ctx, buf, len, pos); + spufs_get_dma_info(ctx, &info); spin_unlock(&ctx->csa.register_lock); spu_release_saved(ctx); - return ret; + return simple_read_from_buffer(buf, len, pos, &info, + sizeof(info)); } static const struct file_operations spufs_dma_info_fops = { @@ -2146,13 +2174,31 @@ static const struct file_operations spufs_dma_info_fops = { .llseek = no_llseek, }; +static void spufs_get_proxydma_info(struct spu_context *ctx, + struct spu_proxydma_info *info) +{ + int i; + + info->proxydma_info_type = ctx->csa.prob.dma_querytype_RW; + info->proxydma_info_mask = ctx->csa.prob.dma_querymask_RW; + info->proxydma_info_status = ctx->csa.prob.dma_tagstatus_R; + + for (i = 0; i < 8; i++) { + struct mfc_cq_sr *qp = &info->proxydma_info_command_data[i]; + struct mfc_cq_sr *puqp = &ctx->csa.priv2.puq[i]; + + qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW; + qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW; + qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW; + qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW; + } +} + static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx, char __user *buf, size_t len, loff_t *pos) { struct spu_proxydma_info info; - struct mfc_cq_sr *qp, *puqp; int ret = sizeof info; - int i; if (len < ret) return -EINVAL; @@ -2160,18 +2206,7 @@ static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx, if (!access_ok(VERIFY_WRITE, buf, len)) return -EFAULT; - info.proxydma_info_type = ctx->csa.prob.dma_querytype_RW; - info.proxydma_info_mask = ctx->csa.prob.dma_querymask_RW; - info.proxydma_info_status = ctx->csa.prob.dma_tagstatus_R; - for (i = 0; i < 8; i++) { - qp = &info.proxydma_info_command_data[i]; - puqp = &ctx->csa.priv2.puq[i]; - - qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW; - qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW; - qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW; - qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW; - } + spufs_get_proxydma_info(ctx, &info); return simple_read_from_buffer(buf, len, pos, &info, sizeof info); @@ -2181,17 +2216,19 @@ static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf, size_t len, loff_t *pos) { struct spu_context *ctx = file->private_data; + struct spu_proxydma_info info; int ret; ret = spu_acquire_saved(ctx); if (ret) return ret; spin_lock(&ctx->csa.register_lock); - ret = __spufs_proxydma_info_read(ctx, buf, len, pos); + spufs_get_proxydma_info(ctx, &info); spin_unlock(&ctx->csa.register_lock); spu_release_saved(ctx); - return ret; + return simple_read_from_buffer(buf, len, pos, &info, + sizeof(info)); } static const struct file_operations spufs_proxydma_info_fops = { diff --git a/arch/powerpc/platforms/ps3/mm.c b/arch/powerpc/platforms/ps3/mm.c index b0f34663b1aecf388a5e6cf6e09bebbf7ae76bc3..19bae78b1f25b1f48eca98e0bcd0453d25a23e01 100644 --- a/arch/powerpc/platforms/ps3/mm.c +++ b/arch/powerpc/platforms/ps3/mm.c @@ -212,13 +212,14 @@ void ps3_mm_vas_destroy(void) { int result; - DBG("%s:%d: map.vas_id = %llu\n", __func__, __LINE__, map.vas_id); - if (map.vas_id) { result = lv1_select_virtual_address_space(0); - BUG_ON(result); - result = lv1_destruct_virtual_address_space(map.vas_id); - BUG_ON(result); + result += lv1_destruct_virtual_address_space(map.vas_id); + + if (result) { + lv1_panic(0); + } + map.vas_id = 0; } } @@ -316,19 +317,20 @@ static void ps3_mm_region_destroy(struct mem_region *r) int result; if (!r->destroy) { - pr_info("%s:%d: Not destroying high region: %llxh %llxh\n", - __func__, __LINE__, r->base, r->size); return; } - DBG("%s:%d: r->base = %llxh\n", __func__, __LINE__, r->base); - if (r->base) { result = lv1_release_memory(r->base); - BUG_ON(result); + + if (result) { + lv1_panic(0); + } + r->size = r->base = r->offset = 0; map.total = map.rm.size; } + ps3_mm_set_repository_highmem(NULL); } diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c index 99d1152ae22413a5b8139ad9c9f0d9328a96d336..5ec935521204a57d165b92ddf0d11b736e11b098 100644 --- a/arch/powerpc/platforms/pseries/ras.c +++ b/arch/powerpc/platforms/pseries/ras.c @@ -325,10 +325,11 @@ static irqreturn_t ras_error_interrupt(int irq, void *dev_id) /* * Some versions of FWNMI place the buffer inside the 4kB page starting at * 0x7000. Other versions place it inside the rtas buffer. We check both. + * Minimum size of the buffer is 16 bytes. */ #define VALID_FWNMI_BUFFER(A) \ - ((((A) >= 0x7000) && ((A) < 0x7ff0)) || \ - (((A) >= rtas.base) && ((A) < (rtas.base + rtas.size - 16)))) + ((((A) >= 0x7000) && ((A) <= 0x8000 - 16)) || \ + (((A) >= rtas.base) && ((A) <= (rtas.base + rtas.size - 16)))) /* * Get the error information for errors coming through the diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c index b7ae5a027714eabb75002033d431d195b0e13443..f8181c8af32db39a82e7cd4ccfbc772f5c66cc2e 100644 --- a/arch/powerpc/sysdev/xive/common.c +++ b/arch/powerpc/sysdev/xive/common.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include @@ -932,12 +933,16 @@ EXPORT_SYMBOL_GPL(is_xive_irq); void xive_cleanup_irq_data(struct xive_irq_data *xd) { if (xd->eoi_mmio) { + unmap_kernel_range((unsigned long)xd->eoi_mmio, + 1u << xd->esb_shift); iounmap(xd->eoi_mmio); if (xd->eoi_mmio == xd->trig_mmio) xd->trig_mmio = NULL; xd->eoi_mmio = NULL; } if (xd->trig_mmio) { + unmap_kernel_range((unsigned long)xd->trig_mmio, + 1u << xd->esb_shift); iounmap(xd->trig_mmio); xd->trig_mmio = NULL; } diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 3fdc0bb974d924dd6b29ebdd5decfac176ea28e3..82d76ac71d2ec7cd89ece892c7ab31cab1f7b60c 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -33,12 +33,12 @@ #define KVM_USER_MEM_SLOTS 32 /* - * These seem to be used for allocating ->chip in the routing table, - * which we don't use. 4096 is an out-of-thin-air value. If we need - * to look at ->chip later on, we'll need to revisit this. + * These seem to be used for allocating ->chip in the routing table, which we + * don't use. 1 is as small as we can get to reduce the needed memory. If we + * need to look at ->chip later on, we'll need to revisit this. */ #define KVM_NR_IRQCHIPS 1 -#define KVM_IRQCHIP_NUM_PINS 4096 +#define KVM_IRQCHIP_NUM_PINS 1 #define KVM_HALT_POLL_NS_DEFAULT 80000 /* s390-specific vcpu->requests bit members */ diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h index 6bc941be6921773f566efd701a213ef44793a793..166fbd74e316c42d2f769963c7d2856b30703a8a 100644 --- a/arch/s390/include/asm/syscall.h +++ b/arch/s390/include/asm/syscall.h @@ -41,7 +41,17 @@ static inline void syscall_rollback(struct task_struct *task, static inline long syscall_get_error(struct task_struct *task, struct pt_regs *regs) { - return IS_ERR_VALUE(regs->gprs[2]) ? regs->gprs[2] : 0; + unsigned long error = regs->gprs[2]; +#ifdef CONFIG_COMPAT + if (test_tsk_thread_flag(task, TIF_31BIT)) { + /* + * Sign-extend the value so (int)-EFOO becomes (long)-EFOO + * and will match correctly in comparisons. + */ + error = (long)(int)error; + } +#endif + return IS_ERR_VALUE(error) ? error : 0; } static inline long syscall_get_return_value(struct task_struct *task, diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index 4ba5ad44a21a2ec860e35789c8da3816b9501612..73045142febf062b6857af4001bd40c0cfbf586f 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -317,6 +317,8 @@ static noinline __init void setup_lowcore_early(void) psw_t psw; psw.mask = PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA; + if (IS_ENABLED(CONFIG_KASAN)) + psw.mask |= PSW_MASK_DAT; psw.addr = (unsigned long) s390_base_ext_handler; S390_lowcore.external_new_psw = psw; psw.addr = (unsigned long) s390_base_pgm_handler; diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S index 0cfd5a83a1daa9d32127454b5fa18609076b094b..151f001a90ff7935130ed0fcbec2c7262efb7b19 100644 --- a/arch/s390/kernel/mcount.S +++ b/arch/s390/kernel/mcount.S @@ -40,6 +40,7 @@ EXPORT_SYMBOL(_mcount) ENTRY(ftrace_caller) .globl ftrace_regs_caller .set ftrace_regs_caller,ftrace_caller + stg %r14,(__SF_GPRS+8*8)(%r15) # save traced function caller lgr %r1,%r15 #ifndef CC_USING_HOTPATCH aghi %r0,MCOUNT_RETURN_FIXUP diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index 56e0190d6e6518cff6512525431c588e04b9ae3e..42e4cd20fbbed13c0b306d31bbf7fc4d050075ec 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c @@ -322,6 +322,25 @@ static inline void __poke_user_per(struct task_struct *child, child->thread.per_user.end = data; } +static void fixup_int_code(struct task_struct *child, addr_t data) +{ + struct pt_regs *regs = task_pt_regs(child); + int ilc = regs->int_code >> 16; + u16 insn; + + if (ilc > 6) + return; + + if (ptrace_access_vm(child, regs->psw.addr - (regs->int_code >> 16), + &insn, sizeof(insn), FOLL_FORCE) != sizeof(insn)) + return; + + /* double check that tracee stopped on svc instruction */ + if ((insn >> 8) != 0xa) + return; + + regs->int_code = 0x20000 | (data & 0xffff); +} /* * Write a word to the user area of a process at location addr. This * operation does have an additional problem compared to peek_user. @@ -333,7 +352,9 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data) struct user *dummy = NULL; addr_t offset; + if (addr < (addr_t) &dummy->regs.acrs) { + struct pt_regs *regs = task_pt_regs(child); /* * psw and gprs are stored on the stack */ @@ -351,7 +372,11 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data) /* Invalid addressing mode bits */ return -EINVAL; } - *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data; + + if (test_pt_regs_flag(regs, PIF_SYSCALL) && + addr == offsetof(struct user, regs.gprs[2])) + fixup_int_code(child, data); + *(addr_t *)((addr_t) ®s->psw + addr) = data; } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) { /* @@ -717,6 +742,10 @@ static int __poke_user_compat(struct task_struct *child, regs->psw.mask = (regs->psw.mask & ~PSW_MASK_BA) | (__u64)(tmp & PSW32_ADDR_AMODE); } else { + + if (test_pt_regs_flag(regs, PIF_SYSCALL) && + addr == offsetof(struct compat_user, regs.gprs[2])) + fixup_int_code(child, data); /* gpr 0-15 */ *(__u32*)((addr_t) ®s->psw + addr*2 + 4) = tmp; } diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c index e19ea9ebe960bd0444a46b3d6fe39c67267f90e7..777a4418693fb21df8c167ef4614b1f3b1c2ef96 100644 --- a/arch/s390/mm/hugetlbpage.c +++ b/arch/s390/mm/hugetlbpage.c @@ -117,7 +117,7 @@ static inline pte_t __rste_to_pte(unsigned long rste) _PAGE_YOUNG); #ifdef CONFIG_MEM_SOFT_DIRTY pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_SOFT_DIRTY, - _PAGE_DIRTY); + _PAGE_SOFT_DIRTY); #endif pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_NOEXEC, _PAGE_NOEXEC); diff --git a/arch/sh/include/asm/uaccess.h b/arch/sh/include/asm/uaccess.h index 32eb56e00c11f9558b346a809d66643463203d4e..6e7816360a751eadc9de2657faae2f22a42596c4 100644 --- a/arch/sh/include/asm/uaccess.h +++ b/arch/sh/include/asm/uaccess.h @@ -16,8 +16,11 @@ * sum := addr + size; carry? --> flag = true; * if (sum >= addr_limit) flag = true; */ -#define __access_ok(addr, size) \ - (__addr_ok((addr) + (size))) +#define __access_ok(addr, size) ({ \ + unsigned long __ao_a = (addr), __ao_b = (size); \ + unsigned long __ao_end = __ao_a + __ao_b - !!__ao_b; \ + __ao_end >= __ao_a && __addr_ok(__ao_end); }) + #define access_ok(type, addr, size) \ (__chk_user_ptr(addr), \ __access_ok((unsigned long __force)(addr), (size))) diff --git a/arch/sparc/kernel/ptrace_32.c b/arch/sparc/kernel/ptrace_32.c index 16b50afe7b52fcf8054415660a7fc0705decae8b..646dd58169ecb077eb257b051afbbc131cc4a77f 100644 --- a/arch/sparc/kernel/ptrace_32.c +++ b/arch/sparc/kernel/ptrace_32.c @@ -46,82 +46,79 @@ enum sparc_regset { REGSET_FP, }; +static int regwindow32_get(struct task_struct *target, + const struct pt_regs *regs, + u32 *uregs) +{ + unsigned long reg_window = regs->u_regs[UREG_I6]; + int size = 16 * sizeof(u32); + + if (target == current) { + if (copy_from_user(uregs, (void __user *)reg_window, size)) + return -EFAULT; + } else { + if (access_process_vm(target, reg_window, uregs, size, + FOLL_FORCE) != size) + return -EFAULT; + } + return 0; +} + +static int regwindow32_set(struct task_struct *target, + const struct pt_regs *regs, + u32 *uregs) +{ + unsigned long reg_window = regs->u_regs[UREG_I6]; + int size = 16 * sizeof(u32); + + if (target == current) { + if (copy_to_user((void __user *)reg_window, uregs, size)) + return -EFAULT; + } else { + if (access_process_vm(target, reg_window, uregs, size, + FOLL_FORCE | FOLL_WRITE) != size) + return -EFAULT; + } + return 0; +} + static int genregs32_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { const struct pt_regs *regs = target->thread.kregs; - unsigned long __user *reg_window; - unsigned long *k = kbuf; - unsigned long __user *u = ubuf; - unsigned long reg; + u32 uregs[16]; + int ret; if (target == current) flush_user_windows(); - pos /= sizeof(reg); - count /= sizeof(reg); - - if (kbuf) { - for (; count > 0 && pos < 16; count--) - *k++ = regs->u_regs[pos++]; - - reg_window = (unsigned long __user *) regs->u_regs[UREG_I6]; - reg_window -= 16; - for (; count > 0 && pos < 32; count--) { - if (get_user(*k++, ®_window[pos++])) - return -EFAULT; - } - } else { - for (; count > 0 && pos < 16; count--) { - if (put_user(regs->u_regs[pos++], u++)) - return -EFAULT; - } - - reg_window = (unsigned long __user *) regs->u_regs[UREG_I6]; - reg_window -= 16; - for (; count > 0 && pos < 32; count--) { - if (get_user(reg, ®_window[pos++]) || - put_user(reg, u++)) - return -EFAULT; - } - } - while (count > 0) { - switch (pos) { - case 32: /* PSR */ - reg = regs->psr; - break; - case 33: /* PC */ - reg = regs->pc; - break; - case 34: /* NPC */ - reg = regs->npc; - break; - case 35: /* Y */ - reg = regs->y; - break; - case 36: /* WIM */ - case 37: /* TBR */ - reg = 0; - break; - default: - goto finish; - } + ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, + regs->u_regs, + 0, 16 * sizeof(u32)); + if (ret || !count) + return ret; - if (kbuf) - *k++ = reg; - else if (put_user(reg, u++)) + if (pos < 32 * sizeof(u32)) { + if (regwindow32_get(target, regs, uregs)) return -EFAULT; - pos++; - count--; + ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, + uregs, + 16 * sizeof(u32), 32 * sizeof(u32)); + if (ret || !count) + return ret; } -finish: - pos *= sizeof(reg); - count *= sizeof(reg); - return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, - 38 * sizeof(reg), -1); + uregs[0] = regs->psr; + uregs[1] = regs->pc; + uregs[2] = regs->npc; + uregs[3] = regs->y; + uregs[4] = 0; /* WIM */ + uregs[5] = 0; /* TBR */ + return user_regset_copyout(&pos, &count, &kbuf, &ubuf, + uregs, + 32 * sizeof(u32), 38 * sizeof(u32)); } static int genregs32_set(struct task_struct *target, @@ -130,82 +127,58 @@ static int genregs32_set(struct task_struct *target, const void *kbuf, const void __user *ubuf) { struct pt_regs *regs = target->thread.kregs; - unsigned long __user *reg_window; - const unsigned long *k = kbuf; - const unsigned long __user *u = ubuf; - unsigned long reg; + u32 uregs[16]; + u32 psr; + int ret; if (target == current) flush_user_windows(); - pos /= sizeof(reg); - count /= sizeof(reg); - - if (kbuf) { - for (; count > 0 && pos < 16; count--) - regs->u_regs[pos++] = *k++; - - reg_window = (unsigned long __user *) regs->u_regs[UREG_I6]; - reg_window -= 16; - for (; count > 0 && pos < 32; count--) { - if (put_user(*k++, ®_window[pos++])) - return -EFAULT; - } - } else { - for (; count > 0 && pos < 16; count--) { - if (get_user(reg, u++)) - return -EFAULT; - regs->u_regs[pos++] = reg; - } - - reg_window = (unsigned long __user *) regs->u_regs[UREG_I6]; - reg_window -= 16; - for (; count > 0 && pos < 32; count--) { - if (get_user(reg, u++) || - put_user(reg, ®_window[pos++])) - return -EFAULT; - } - } - while (count > 0) { - unsigned long psr; + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + regs->u_regs, + 0, 16 * sizeof(u32)); + if (ret || !count) + return ret; - if (kbuf) - reg = *k++; - else if (get_user(reg, u++)) + if (pos < 32 * sizeof(u32)) { + if (regwindow32_get(target, regs, uregs)) return -EFAULT; - - switch (pos) { - case 32: /* PSR */ - psr = regs->psr; - psr &= ~(PSR_ICC | PSR_SYSCALL); - psr |= (reg & (PSR_ICC | PSR_SYSCALL)); - regs->psr = psr; - break; - case 33: /* PC */ - regs->pc = reg; - break; - case 34: /* NPC */ - regs->npc = reg; - break; - case 35: /* Y */ - regs->y = reg; - break; - case 36: /* WIM */ - case 37: /* TBR */ - break; - default: - goto finish; - } - - pos++; - count--; + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + uregs, + 16 * sizeof(u32), 32 * sizeof(u32)); + if (ret) + return ret; + if (regwindow32_set(target, regs, uregs)) + return -EFAULT; + if (!count) + return 0; } -finish: - pos *= sizeof(reg); - count *= sizeof(reg); - + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &psr, + 32 * sizeof(u32), 33 * sizeof(u32)); + if (ret) + return ret; + regs->psr = (regs->psr & ~(PSR_ICC | PSR_SYSCALL)) | + (psr & (PSR_ICC | PSR_SYSCALL)); + if (!count) + return 0; + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + ®s->pc, + 33 * sizeof(u32), 34 * sizeof(u32)); + if (ret || !count) + return ret; + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + ®s->npc, + 34 * sizeof(u32), 35 * sizeof(u32)); + if (ret || !count) + return ret; + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + ®s->y, + 35 * sizeof(u32), 36 * sizeof(u32)); + if (ret || !count) + return ret; return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, - 38 * sizeof(reg), -1); + 36 * sizeof(u32), 38 * sizeof(u32)); } static int fpregs32_get(struct task_struct *target, diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c index e1d965e90e1697a8205ca505dbf428cb37b30312..0c478c85e380b2f0babba0ecf02269bd9de242d8 100644 --- a/arch/sparc/kernel/ptrace_64.c +++ b/arch/sparc/kernel/ptrace_64.c @@ -571,19 +571,13 @@ static int genregs32_get(struct task_struct *target, for (; count > 0 && pos < 32; count--) { if (access_process_vm(target, (unsigned long) - ®_window[pos], + ®_window[pos++], ®, sizeof(reg), FOLL_FORCE) != sizeof(reg)) return -EFAULT; - if (access_process_vm(target, - (unsigned long) u, - ®, sizeof(reg), - FOLL_FORCE | FOLL_WRITE) - != sizeof(reg)) + if (put_user(reg, u++)) return -EFAULT; - pos++; - u++; } } } @@ -683,12 +677,7 @@ static int genregs32_set(struct task_struct *target, } } else { for (; count > 0 && pos < 32; count--) { - if (access_process_vm(target, - (unsigned long) - u, - ®, sizeof(reg), - FOLL_FORCE) - != sizeof(reg)) + if (get_user(reg, u++)) return -EFAULT; if (access_process_vm(target, (unsigned long) diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile index 1c060748c813eb926ffc7dd5f3e1a66b84495557..f38ffcc610d201d081c6a5234642fc64fa0a631c 100644 --- a/arch/x86/boot/Makefile +++ b/arch/x86/boot/Makefile @@ -87,7 +87,7 @@ $(obj)/vmlinux.bin: $(obj)/compressed/vmlinux FORCE SETUP_OBJS = $(addprefix $(obj)/,$(setup-y)) -sed-zoffset := -e 's/^\([0-9a-fA-F]*\) [ABCDGRSTVW] \(startup_32\|startup_64\|efi32_stub_entry\|efi64_stub_entry\|efi_pe_entry\|input_data\|_end\|_ehead\|_text\|z_.*\)$$/\#define ZO_\2 0x\1/p' +sed-zoffset := -e 's/^\([0-9a-fA-F]*\) [a-zA-Z] \(startup_32\|startup_64\|efi32_stub_entry\|efi64_stub_entry\|efi_pe_entry\|input_data\|_end\|_ehead\|_text\|z_.*\)$$/\#define ZO_\2 0x\1/p' quiet_cmd_zoffset = ZOFFSET $@ cmd_zoffset = $(NM) $< | sed -n $(sed-zoffset) > $@ diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S index 01d628ea34024c1a88192a2a19e12920bceaf196..c6c4b877f3d2ffcb2fd9ac9fac7f9fde255d4d91 100644 --- a/arch/x86/boot/compressed/head_32.S +++ b/arch/x86/boot/compressed/head_32.S @@ -49,16 +49,17 @@ * Position Independent Executable (PIE) so that linker won't optimize * R_386_GOT32X relocation to its fixed symbol address. Older * linkers generate R_386_32 relocations against locally defined symbols, - * _bss, _ebss, _got and _egot, in PIE. It isn't wrong, just less + * _bss, _ebss, _got, _egot and _end, in PIE. It isn't wrong, just less * optimal than R_386_RELATIVE. But the x86 kernel fails to properly handle * R_386_32 relocations when relocating the kernel. To generate - * R_386_RELATIVE relocations, we mark _bss, _ebss, _got and _egot as + * R_386_RELATIVE relocations, we mark _bss, _ebss, _got, _egot and _end as * hidden: */ .hidden _bss .hidden _ebss .hidden _got .hidden _egot + .hidden _end __HEAD ENTRY(startup_32) diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index a25127916e67973faa03b06ed4f869beb86e4a68..7ab1c6bcc66a9cf29e74d71e7cd771b88599b003 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -41,6 +41,7 @@ .hidden _ebss .hidden _got .hidden _egot + .hidden _end __HEAD .code32 diff --git a/arch/x86/configs/x86_64_cuttlefish_defconfig b/arch/x86/configs/x86_64_cuttlefish_defconfig index 5a03ff9b45dba5078bb0df09f0f6ce4130cf21c4..a108576ac594e14609ef19ad745d5550aee34ffc 100644 --- a/arch/x86/configs/x86_64_cuttlefish_defconfig +++ b/arch/x86/configs/x86_64_cuttlefish_defconfig @@ -1,3 +1,4 @@ +CONFIG_KERNEL_LZ4=y CONFIG_POSIX_MQUEUE=y # CONFIG_FHANDLE is not set # CONFIG_USELIB is not set @@ -22,7 +23,6 @@ CONFIG_CPUSETS=y CONFIG_CGROUP_CPUACCT=y CONFIG_CGROUP_BPF=y CONFIG_NAMESPACES=y -# CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_SCHED_TUNE=y CONFIG_BLK_DEV_INITRD=y @@ -102,7 +102,9 @@ CONFIG_IP_ADVANCED_ROUTER=y CONFIG_IP_MULTIPLE_TABLES=y CONFIG_IP_ROUTE_MULTIPATH=y CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_NET_IPIP=y CONFIG_NET_IPGRE_DEMUX=y +CONFIG_NET_IPGRE=y CONFIG_IP_MROUTE=y CONFIG_IP_PIMSM_V1=y CONFIG_IP_PIMSM_V2=y @@ -125,6 +127,7 @@ CONFIG_INET6_ESP=y CONFIG_INET6_IPCOMP=y CONFIG_IPV6_MIP6=y CONFIG_IPV6_VTI=y +CONFIG_IPV6_GRE=y CONFIG_IPV6_MULTIPLE_TABLES=y CONFIG_NETLABEL=y CONFIG_NETFILTER=y @@ -257,6 +260,7 @@ CONFIG_DUMMY=y CONFIG_NETCONSOLE=y CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_TUN=y +CONFIG_VETH=y CONFIG_VIRTIO_NET=y # CONFIG_ETHERNET is not set CONFIG_PPP=y @@ -426,6 +430,7 @@ CONFIG_USB_CONFIGFS_UEVENT=y CONFIG_USB_CONFIGFS_F_MIDI=y CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_TEST=y +# CONFIG_RTC_DRV_CMOS is not set CONFIG_VIRTIO_PCI=y CONFIG_VIRTIO_PMEM=y CONFIG_VIRTIO_INPUT=y @@ -504,6 +509,7 @@ CONFIG_SECURITY=y CONFIG_SECURITY_NETWORK=y CONFIG_SECURITY_PATH=y CONFIG_HARDENED_USERCOPY=y +CONFIG_STATIC_USERMODEHELPER=y CONFIG_SECURITY_SELINUX=y CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1 CONFIG_INIT_STACK_ALL=y diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h index 557c1bdda311b56b8ff9d57ec26d6bcc3eed6da2..1dbc62a96b859df050441781cec5499997761be0 100644 --- a/arch/x86/entry/calling.h +++ b/arch/x86/entry/calling.h @@ -98,13 +98,6 @@ For 32-bit we have the following conventions - kernel is built with #define SIZEOF_PTREGS 21*8 .macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax save_ret=0 - /* - * Push registers and sanitize registers of values that a - * speculation attack might otherwise want to exploit. The - * lower registers are likely clobbered well before they - * could be put to use in a speculative execution gadget. - * Interleave XOR with PUSH for better uop scheduling: - */ .if \save_ret pushq %rsi /* pt_regs->si */ movq 8(%rsp), %rsi /* temporarily store the return address in %rsi */ @@ -117,29 +110,40 @@ For 32-bit we have the following conventions - kernel is built with pushq %rcx /* pt_regs->cx */ pushq \rax /* pt_regs->ax */ pushq %r8 /* pt_regs->r8 */ - xorl %r8d, %r8d /* nospec r8 */ pushq %r9 /* pt_regs->r9 */ - xorl %r9d, %r9d /* nospec r9 */ pushq %r10 /* pt_regs->r10 */ - xorl %r10d, %r10d /* nospec r10 */ pushq %r11 /* pt_regs->r11 */ - xorl %r11d, %r11d /* nospec r11*/ pushq %rbx /* pt_regs->rbx */ - xorl %ebx, %ebx /* nospec rbx*/ pushq %rbp /* pt_regs->rbp */ - xorl %ebp, %ebp /* nospec rbp*/ pushq %r12 /* pt_regs->r12 */ - xorl %r12d, %r12d /* nospec r12*/ pushq %r13 /* pt_regs->r13 */ - xorl %r13d, %r13d /* nospec r13*/ pushq %r14 /* pt_regs->r14 */ - xorl %r14d, %r14d /* nospec r14*/ pushq %r15 /* pt_regs->r15 */ - xorl %r15d, %r15d /* nospec r15*/ UNWIND_HINT_REGS + .if \save_ret pushq %rsi /* return address on top of stack */ .endif + + /* + * Sanitize registers of values that a speculation attack might + * otherwise want to exploit. The lower registers are likely clobbered + * well before they could be put to use in a speculative execution + * gadget. + */ + xorl %edx, %edx /* nospec dx */ + xorl %ecx, %ecx /* nospec cx */ + xorl %r8d, %r8d /* nospec r8 */ + xorl %r9d, %r9d /* nospec r9 */ + xorl %r10d, %r10d /* nospec r10 */ + xorl %r11d, %r11d /* nospec r11 */ + xorl %ebx, %ebx /* nospec rbx */ + xorl %ebp, %ebp /* nospec rbp */ + xorl %r12d, %r12d /* nospec r12 */ + xorl %r13d, %r13d /* nospec r13 */ + xorl %r14d, %r14d /* nospec r14 */ + xorl %r15d, %r15d /* nospec r15 */ + .endm .macro POP_REGS pop_rdi=1 skip_r11rcx=0 diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 5ec66fafde4e4ea7a352592b0d51ba34e03b89ef..f24974bddfc962b9b8fdaf0254c9cb079acde59f 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -302,7 +302,6 @@ GLOBAL(entry_SYSCALL_64_after_hwframe) */ syscall_return_via_sysret: /* rcx and r11 are already restored (see code above) */ - UNWIND_HINT_EMPTY POP_REGS pop_rdi=0 skip_r11rcx=1 /* @@ -311,6 +310,7 @@ syscall_return_via_sysret: */ movq %rsp, %rdi movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp + UNWIND_HINT_EMPTY pushq RSP-RDI(%rdi) /* RSP */ pushq (%rdi) /* RDI */ @@ -606,6 +606,7 @@ GLOBAL(swapgs_restore_regs_and_return_to_usermode) */ movq %rsp, %rdi movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp + UNWIND_HINT_EMPTY /* Copy the IRET frame to the trampoline stack. */ pushq 6*8(%rdi) /* SS */ @@ -1648,7 +1649,7 @@ ENTRY(rewind_stack_do_exit) movq PER_CPU_VAR(cpu_current_top_of_stack), %rax leaq -PTREGS_SIZE(%rax), %rsp - UNWIND_HINT_FUNC sp_offset=PTREGS_SIZE + UNWIND_HINT_REGS call do_exit END(rewind_stack_do_exit) diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h index 3fa039855b8f70aa2738468e33bef882b0a398cb..9f645ba57dbb263822600aae5d82138316c8f6e3 100644 --- a/arch/x86/include/asm/bitops.h +++ b/arch/x86/include/asm/bitops.h @@ -78,7 +78,7 @@ set_bit(long nr, volatile unsigned long *addr) : "iq" ((u8)CONST_MASK(nr)) : "memory"); } else { - asm volatile(LOCK_PREFIX "bts %1,%0" + asm volatile(LOCK_PREFIX __ASM_SIZE(bts) " %1,%0" : BITOP_ADDR(addr) : "Ir" (nr) : "memory"); } } @@ -94,7 +94,7 @@ set_bit(long nr, volatile unsigned long *addr) */ static __always_inline void __set_bit(long nr, volatile unsigned long *addr) { - asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory"); + asm volatile(__ASM_SIZE(bts) " %1,%0" : ADDR : "Ir" (nr) : "memory"); } /** @@ -115,7 +115,7 @@ clear_bit(long nr, volatile unsigned long *addr) : CONST_MASK_ADDR(nr, addr) : "iq" ((u8)~CONST_MASK(nr))); } else { - asm volatile(LOCK_PREFIX "btr %1,%0" + asm volatile(LOCK_PREFIX __ASM_SIZE(btr) " %1,%0" : BITOP_ADDR(addr) : "Ir" (nr)); } @@ -137,7 +137,7 @@ static __always_inline void clear_bit_unlock(long nr, volatile unsigned long *ad static __always_inline void __clear_bit(long nr, volatile unsigned long *addr) { - asm volatile("btr %1,%0" : ADDR : "Ir" (nr)); + asm volatile(__ASM_SIZE(btr) " %1,%0" : ADDR : "Ir" (nr)); } static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr) @@ -182,7 +182,7 @@ static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long * */ static __always_inline void __change_bit(long nr, volatile unsigned long *addr) { - asm volatile("btc %1,%0" : ADDR : "Ir" (nr)); + asm volatile(__ASM_SIZE(btc) " %1,%0" : ADDR : "Ir" (nr)); } /** @@ -201,7 +201,7 @@ static __always_inline void change_bit(long nr, volatile unsigned long *addr) : CONST_MASK_ADDR(nr, addr) : "iq" ((u8)CONST_MASK(nr))); } else { - asm volatile(LOCK_PREFIX "btc %1,%0" + asm volatile(LOCK_PREFIX __ASM_SIZE(btc) " %1,%0" : BITOP_ADDR(addr) : "Ir" (nr)); } @@ -217,7 +217,8 @@ static __always_inline void change_bit(long nr, volatile unsigned long *addr) */ static __always_inline bool test_and_set_bit(long nr, volatile unsigned long *addr) { - GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", c); + GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(bts), + *addr, "Ir", nr, "%0", c); } /** @@ -246,7 +247,7 @@ static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long * { bool oldbit; - asm("bts %2,%1" + asm(__ASM_SIZE(bts) " %2,%1" CC_SET(c) : CC_OUT(c) (oldbit), ADDR : "Ir" (nr)); @@ -263,7 +264,8 @@ static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long * */ static __always_inline bool test_and_clear_bit(long nr, volatile unsigned long *addr) { - GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", c); + GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btr), + *addr, "Ir", nr, "%0", c); } /** @@ -286,7 +288,7 @@ static __always_inline bool __test_and_clear_bit(long nr, volatile unsigned long { bool oldbit; - asm volatile("btr %2,%1" + asm volatile(__ASM_SIZE(btr) " %2,%1" CC_SET(c) : CC_OUT(c) (oldbit), ADDR : "Ir" (nr)); @@ -298,7 +300,7 @@ static __always_inline bool __test_and_change_bit(long nr, volatile unsigned lon { bool oldbit; - asm volatile("btc %2,%1" + asm volatile(__ASM_SIZE(btc) " %2,%1" CC_SET(c) : CC_OUT(c) (oldbit), ADDR : "Ir" (nr) : "memory"); @@ -316,7 +318,8 @@ static __always_inline bool __test_and_change_bit(long nr, volatile unsigned lon */ static __always_inline bool test_and_change_bit(long nr, volatile unsigned long *addr) { - GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", c); + GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btc), + *addr, "Ir", nr, "%0", c); } static __always_inline bool constant_test_bit(long nr, const volatile unsigned long *addr) @@ -329,7 +332,7 @@ static __always_inline bool variable_test_bit(long nr, volatile const unsigned l { bool oldbit; - asm volatile("bt %2,%1" + asm volatile(__ASM_SIZE(bt) " %2,%1" CC_SET(c) : CC_OUT(c) (oldbit) : "m" (*(unsigned long *)addr), "Ir" (nr)); diff --git a/arch/x86/include/asm/cpu_device_id.h b/arch/x86/include/asm/cpu_device_id.h index baeba05671268cc7d835ecf6decbfc9fc5fc93ea..88446659294374b2a2aaec82b4fd928b0315ff90 100644 --- a/arch/x86/include/asm/cpu_device_id.h +++ b/arch/x86/include/asm/cpu_device_id.h @@ -9,6 +9,33 @@ #include +#define X86_STEPPINGS(mins, maxs) GENMASK(maxs, mins) + +/** + * X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE - Base macro for CPU matching + * @_vendor: The vendor name, e.g. INTEL, AMD, HYGON, ..., ANY + * The name is expanded to X86_VENDOR_@_vendor + * @_family: The family number or X86_FAMILY_ANY + * @_model: The model number, model constant or X86_MODEL_ANY + * @_steppings: Bitmask for steppings, stepping constant or X86_STEPPING_ANY + * @_feature: A X86_FEATURE bit or X86_FEATURE_ANY + * @_data: Driver specific data or NULL. The internal storage + * format is unsigned long. The supplied value, pointer + * etc. is casted to unsigned long internally. + * + * Backport version to keep the SRBDS pile consistant. No shorter variants + * required for this. + */ +#define X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE(_vendor, _family, _model, \ + _steppings, _feature, _data) { \ + .vendor = X86_VENDOR_##_vendor, \ + .family = _family, \ + .model = _model, \ + .steppings = _steppings, \ + .feature = _feature, \ + .driver_data = (unsigned long) _data \ +} + extern const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id *match); #endif diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 157cfaf1064c9e874185a0ee154f2e975c5835b3..e08866cd22878c9c9cddd7a2fbd22fd78a401ab9 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -291,6 +291,7 @@ #define X86_FEATURE_AMD_IBPB (13*32+12) /* "" Indirect Branch Prediction Barrier */ #define X86_FEATURE_AMD_IBRS (13*32+14) /* "" Indirect Branch Restricted Speculation */ #define X86_FEATURE_AMD_STIBP (13*32+15) /* "" Single Thread Indirect Branch Predictors */ +#define X86_FEATURE_AMD_STIBP_ALWAYS_ON (13*32+17) /* "" Single Thread Indirect Branch Predictors always-on preferred */ #define X86_FEATURE_AMD_SSBD (13*32+24) /* "" Speculative Store Bypass Disable */ #define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */ #define X86_FEATURE_AMD_SSB_NO (13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */ @@ -346,6 +347,7 @@ /* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */ #define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */ #define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */ +#define X86_FEATURE_SRBDS_CTRL (18*32+ 9) /* "" SRBDS mitigation MSR available */ #define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* "" TSX_FORCE_ABORT */ #define X86_FEATURE_MD_CLEAR (18*32+10) /* VERW clears CPU buffers */ #define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */ @@ -390,5 +392,6 @@ #define X86_BUG_SWAPGS X86_BUG(21) /* CPU is affected by speculation through SWAPGS */ #define X86_BUG_TAA X86_BUG(22) /* CPU is affected by TSX Async Abort(TAA) */ #define X86_BUG_ITLB_MULTIHIT X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */ +#define X86_BUG_SRBDS X86_BUG(24) /* CPU may leak RNG bits if not mitigated */ #endif /* _ASM_X86_CPUFEATURES_H */ diff --git a/arch/x86/include/asm/dma.h b/arch/x86/include/asm/dma.h index 00f7cf45e6999b8edb776784effcb6bcda778ddb..8e95aa4b0d172362263c178de77c7b324de39263 100644 --- a/arch/x86/include/asm/dma.h +++ b/arch/x86/include/asm/dma.h @@ -74,7 +74,7 @@ #define MAX_DMA_PFN ((16UL * 1024 * 1024) >> PAGE_SHIFT) /* 4GB broken PCI/AGP hardware bus master zone */ -#define MAX_DMA32_PFN ((4UL * 1024 * 1024 * 1024) >> PAGE_SHIFT) +#define MAX_DMA32_PFN (1UL << (32 - PAGE_SHIFT)) #ifdef CONFIG_X86_32 /* The maximum address that we can perform a DMA transfer to on this platform */ diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 9529fe69e1d927bb43199d0e8d1a57b4c2617f89..ecb6009a2c8a2de775d51a8b1ef248e9d15eedf5 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1060,7 +1060,7 @@ struct kvm_x86_ops { void (*enable_log_dirty_pt_masked)(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t offset, unsigned long mask); - int (*write_log_dirty)(struct kvm_vcpu *vcpu); + int (*write_log_dirty)(struct kvm_vcpu *vcpu, gpa_t l2_gpa); /* pmu operations of sub-arch */ const struct kvm_pmu_ops *pmu_ops; diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 5761a86b88e028c1d0f5b5b87c9cef519b9a6dfe..9dc445ac7f19d66c2339a9fe1a331ce2bba3445c 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -110,6 +110,10 @@ #define TSX_CTRL_RTM_DISABLE BIT(0) /* Disable RTM feature */ #define TSX_CTRL_CPUID_CLEAR BIT(1) /* Disable TSX enumeration */ +/* SRBDS support */ +#define MSR_IA32_MCU_OPT_CTRL 0x00000123 +#define RNGDS_MITG_DIS BIT(0) + #define MSR_IA32_SYSENTER_CS 0x00000174 #define MSR_IA32_SYSENTER_ESP 0x00000175 #define MSR_IA32_SYSENTER_EIP 0x00000176 diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index b73a16a56e4f212584ef7f7edd11da0d752e2ea3..041d2a04be1d8a894442938917e2ec8a860af55b 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -232,6 +232,7 @@ enum spectre_v2_mitigation { enum spectre_v2_user_mitigation { SPECTRE_V2_USER_NONE, SPECTRE_V2_USER_STRICT, + SPECTRE_V2_USER_STRICT_PREFERRED, SPECTRE_V2_USER_PRCTL, SPECTRE_V2_USER_SECCOMP, }; diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index 12aa2bb6bac4b5d40cfc22d569c4004d35c67c6b..6abf3af96fc817de18395fe5ff1ca31c259b9439 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -526,7 +526,7 @@ static inline bool x86_this_cpu_variable_test_bit(int nr, { bool oldbit; - asm volatile("bt "__percpu_arg(2)",%1" + asm volatile("btl "__percpu_arg(2)",%1" CC_SET(c) : CC_OUT(c) (oldbit) : "m" (*(unsigned long __percpu *)addr), "Ir" (nr)); diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 98a337e3835d68758297115cc6d9cbba8e5b657a..2f7a4018b6e48e7886f5dcec31e8ce0ac9c02338 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -234,6 +234,7 @@ static inline int pmd_large(pmd_t pte) } #ifdef CONFIG_TRANSPARENT_HUGEPAGE +/* NOTE: when predicate huge page, consider also pmd_devmap, or use pmd_large */ static inline int pmd_trans_huge(pmd_t pmd) { return (pmd_val(pmd) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE; diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 6a87eda9691e4f319f31f0149ff76c0056d11740..56a89519dc144152bfdbd3fd5a6a1faac066744e 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -344,7 +344,7 @@ struct x86_hw_tss { #define INVALID_IO_BITMAP_OFFSET 0x8000 struct entry_stack { - unsigned long words[64]; + char stack[PAGE_SIZE]; }; struct entry_stack_page { diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h index 371b3a4af000764bcae483e8f1125765a35e76e8..55b72ea5e01da67ff58c85ba33493c5ca182d153 100644 --- a/arch/x86/include/asm/stackprotector.h +++ b/arch/x86/include/asm/stackprotector.h @@ -55,8 +55,13 @@ /* * Initialize the stackprotector canary value. * - * NOTE: this must only be called from functions that never return, + * NOTE: this must only be called from functions that never return * and it must always be inlined. + * + * In addition, it should be called from a compilation unit for which + * stack protector is disabled. Alternatively, the caller should not end + * with a function call which gets tail-call optimized as that would + * lead to checking a modified canary value. */ static __always_inline void boot_init_stack_canary(void) { diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index d7f645b7121bfb7ca7eb18a829194c8794d6172d..0c761cc5a794c0816c350285d1a4d8a60f747b0d 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -711,7 +711,17 @@ extern struct movsl_mask { * checking before using them, but you have to surround them with the * user_access_begin/end() pair. */ -#define user_access_begin() __uaccess_begin() +static __must_check inline bool user_access_begin(int type, + const void __user *ptr, + size_t len) +{ + if (unlikely(!access_ok(type, ptr, len))) + return 0; + __uaccess_begin_nospec(); + return 1; +} + +#define user_access_begin(a, b, c) user_access_begin(a, b, c) #define user_access_end() __uaccess_end() #define user_access_save() smap_save() diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 6415b4aead54600c7c845f75d737c6fce8baa17e..ee33f095132232770d4e4cd84724c6ee52aa4d7b 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -353,8 +353,6 @@ static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen) * According to Intel, MFENCE can do the serialization here. */ asm volatile("mfence" : : : "memory"); - - printk_once(KERN_DEBUG "TSC deadline timer enabled\n"); return; } @@ -553,7 +551,7 @@ static DEFINE_PER_CPU(struct clock_event_device, lapic_events); #define DEADLINE_MODEL_MATCH_REV(model, rev) \ { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)rev } -static u32 hsx_deadline_rev(void) +static __init u32 hsx_deadline_rev(void) { switch (boot_cpu_data.x86_stepping) { case 0x02: return 0x3a; /* EP */ @@ -563,7 +561,7 @@ static u32 hsx_deadline_rev(void) return ~0U; } -static u32 bdx_deadline_rev(void) +static __init u32 bdx_deadline_rev(void) { switch (boot_cpu_data.x86_stepping) { case 0x02: return 0x00000011; @@ -575,7 +573,7 @@ static u32 bdx_deadline_rev(void) return ~0U; } -static u32 skx_deadline_rev(void) +static __init u32 skx_deadline_rev(void) { switch (boot_cpu_data.x86_stepping) { case 0x03: return 0x01000136; @@ -588,7 +586,7 @@ static u32 skx_deadline_rev(void) return ~0U; } -static const struct x86_cpu_id deadline_match[] = { +static const struct x86_cpu_id deadline_match[] __initconst = { DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_HASWELL_X, hsx_deadline_rev), DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_BROADWELL_X, 0x0b000020), DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_BROADWELL_XEON_D, bdx_deadline_rev), @@ -610,18 +608,19 @@ static const struct x86_cpu_id deadline_match[] = { {}, }; -static void apic_check_deadline_errata(void) +static __init bool apic_validate_deadline_timer(void) { const struct x86_cpu_id *m; u32 rev; - if (!boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER) || - boot_cpu_has(X86_FEATURE_HYPERVISOR)) - return; + if (!boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER)) + return false; + if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) + return true; m = x86_match_cpu(deadline_match); if (!m) - return; + return true; /* * Function pointers will have the MSB set due to address layout, @@ -633,11 +632,12 @@ static void apic_check_deadline_errata(void) rev = (u32)m->driver_data; if (boot_cpu_data.microcode >= rev) - return; + return true; setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER); pr_err(FW_BUG "TSC_DEADLINE disabled due to Errata; " "please update microcode to version: 0x%x (or later)\n", rev); + return false; } /* @@ -1914,7 +1914,8 @@ void __init init_apic_mappings(void) { unsigned int new_apicid; - apic_check_deadline_errata(); + if (apic_validate_deadline_timer()) + pr_info("TSC deadline timer available\n"); if (x2apic_mode) { boot_cpu_physical_apicid = read_apic_id(); diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 2271adbc3c42a86bb80632d877b78d7346d3d39a..b5652233e674590b565f8d18bcee7438b4c654d4 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -2233,12 +2233,12 @@ static int mp_irqdomain_create(int ioapic) ip->irqdomain = irq_domain_create_linear(fn, hwirqs, cfg->ops, (void *)(long)ioapic); - /* Release fw handle if it was allocated above */ - if (!cfg->dev) - irq_domain_free_fwnode(fn); - - if (!ip->irqdomain) + if (!ip->irqdomain) { + /* Release fw handle if it was allocated above */ + if (!cfg->dev) + irq_domain_free_fwnode(fn); return -ENOMEM; + } ip->irqdomain->parent = parent; diff --git a/arch/x86/kernel/apic/msi.c b/arch/x86/kernel/apic/msi.c index f10e7f93b0e2c04e7b5f22adbc7ebb574f710281..8c102d62b8596bd05e3e61f2683e1359ed5e7b88 100644 --- a/arch/x86/kernel/apic/msi.c +++ b/arch/x86/kernel/apic/msi.c @@ -149,10 +149,11 @@ void __init arch_init_msi_domain(struct irq_domain *parent) msi_default_domain = pci_msi_create_irq_domain(fn, &pci_msi_domain_info, parent); - irq_domain_free_fwnode(fn); } - if (!msi_default_domain) + if (!msi_default_domain) { + irq_domain_free_fwnode(fn); pr_warn("failed to initialize irqdomain for MSI/MSI-x.\n"); + } } #ifdef CONFIG_IRQ_REMAP @@ -185,7 +186,8 @@ struct irq_domain *arch_create_remap_msi_irq_domain(struct irq_domain *parent, if (!fn) return NULL; d = pci_msi_create_irq_domain(fn, &pci_msi_ir_domain_info, parent); - irq_domain_free_fwnode(fn); + if (!d) + irq_domain_free_fwnode(fn); return d; } #endif @@ -248,7 +250,8 @@ static struct irq_domain *dmar_get_irq_domain(void) if (fn) { dmar_domain = msi_create_irq_domain(fn, &dmar_msi_domain_info, x86_vector_domain); - irq_domain_free_fwnode(fn); + if (!dmar_domain) + irq_domain_free_fwnode(fn); } out: mutex_unlock(&dmar_lock); @@ -373,7 +376,10 @@ struct irq_domain *hpet_create_irq_domain(int hpet_id) } d = msi_create_irq_domain(fn, domain_info, parent); - irq_domain_free_fwnode(fn); + if (!d) { + irq_domain_free_fwnode(fn); + kfree(domain_info); + } return d; } diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c index b958082c74a77f0903c830949f6b3aae667adea9..36cd34524ac197116dd412b0881d8c26016684e7 100644 --- a/arch/x86/kernel/apic/vector.c +++ b/arch/x86/kernel/apic/vector.c @@ -457,7 +457,6 @@ int __init arch_early_irq_init(void) x86_vector_domain = irq_domain_create_tree(fn, &x86_vector_domain_ops, NULL); BUG_ON(x86_vector_domain == NULL); - irq_domain_free_fwnode(fn); irq_set_default_host(x86_vector_domain); arch_init_msi_domain(x86_vector_domain); diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 7896a34f53b585f85a9972a5bf54f4103e9555b8..245184152892ccdd279e851aa56acc50653f05d5 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -41,6 +41,7 @@ static void __init l1tf_select_mitigation(void); static void __init mds_select_mitigation(void); static void __init mds_print_mitigation(void); static void __init taa_select_mitigation(void); +static void __init srbds_select_mitigation(void); /* The base value of the SPEC_CTRL MSR that always has to be preserved. */ u64 x86_spec_ctrl_base; @@ -60,7 +61,7 @@ static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS; u64 __ro_after_init x86_amd_ls_cfg_base; u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask; -/* Control conditional STIPB in switch_to() */ +/* Control conditional STIBP in switch_to() */ DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp); /* Control conditional IBPB in switch_mm() */ DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb); @@ -108,6 +109,7 @@ void __init check_bugs(void) l1tf_select_mitigation(); mds_select_mitigation(); taa_select_mitigation(); + srbds_select_mitigation(); /* * As MDS and TAA mitigations are inter-related, print MDS @@ -390,6 +392,97 @@ static int __init tsx_async_abort_parse_cmdline(char *str) } early_param("tsx_async_abort", tsx_async_abort_parse_cmdline); +#undef pr_fmt +#define pr_fmt(fmt) "SRBDS: " fmt + +enum srbds_mitigations { + SRBDS_MITIGATION_OFF, + SRBDS_MITIGATION_UCODE_NEEDED, + SRBDS_MITIGATION_FULL, + SRBDS_MITIGATION_TSX_OFF, + SRBDS_MITIGATION_HYPERVISOR, +}; + +static enum srbds_mitigations srbds_mitigation __ro_after_init = SRBDS_MITIGATION_FULL; + +static const char * const srbds_strings[] = { + [SRBDS_MITIGATION_OFF] = "Vulnerable", + [SRBDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode", + [SRBDS_MITIGATION_FULL] = "Mitigation: Microcode", + [SRBDS_MITIGATION_TSX_OFF] = "Mitigation: TSX disabled", + [SRBDS_MITIGATION_HYPERVISOR] = "Unknown: Dependent on hypervisor status", +}; + +static bool srbds_off; + +void update_srbds_msr(void) +{ + u64 mcu_ctrl; + + if (!boot_cpu_has_bug(X86_BUG_SRBDS)) + return; + + if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) + return; + + if (srbds_mitigation == SRBDS_MITIGATION_UCODE_NEEDED) + return; + + rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); + + switch (srbds_mitigation) { + case SRBDS_MITIGATION_OFF: + case SRBDS_MITIGATION_TSX_OFF: + mcu_ctrl |= RNGDS_MITG_DIS; + break; + case SRBDS_MITIGATION_FULL: + mcu_ctrl &= ~RNGDS_MITG_DIS; + break; + default: + break; + } + + wrmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); +} + +static void __init srbds_select_mitigation(void) +{ + u64 ia32_cap; + + if (!boot_cpu_has_bug(X86_BUG_SRBDS)) + return; + + /* + * Check to see if this is one of the MDS_NO systems supporting + * TSX that are only exposed to SRBDS when TSX is enabled. + */ + ia32_cap = x86_read_arch_cap_msr(); + if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM)) + srbds_mitigation = SRBDS_MITIGATION_TSX_OFF; + else if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) + srbds_mitigation = SRBDS_MITIGATION_HYPERVISOR; + else if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL)) + srbds_mitigation = SRBDS_MITIGATION_UCODE_NEEDED; + else if (cpu_mitigations_off() || srbds_off) + srbds_mitigation = SRBDS_MITIGATION_OFF; + + update_srbds_msr(); + pr_info("%s\n", srbds_strings[srbds_mitigation]); +} + +static int __init srbds_parse_cmdline(char *str) +{ + if (!str) + return -EINVAL; + + if (!boot_cpu_has_bug(X86_BUG_SRBDS)) + return 0; + + srbds_off = !strcmp(str, "off"); + return 0; +} +early_param("srbds", srbds_parse_cmdline); + #undef pr_fmt #define pr_fmt(fmt) "Spectre V1 : " fmt @@ -488,7 +581,9 @@ early_param("nospectre_v1", nospectre_v1_cmdline); static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = SPECTRE_V2_NONE; -static enum spectre_v2_user_mitigation spectre_v2_user __ro_after_init = +static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init = + SPECTRE_V2_USER_NONE; +static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init = SPECTRE_V2_USER_NONE; #ifdef CONFIG_RETPOLINE @@ -540,10 +635,11 @@ enum spectre_v2_user_cmd { }; static const char * const spectre_v2_user_strings[] = { - [SPECTRE_V2_USER_NONE] = "User space: Vulnerable", - [SPECTRE_V2_USER_STRICT] = "User space: Mitigation: STIBP protection", - [SPECTRE_V2_USER_PRCTL] = "User space: Mitigation: STIBP via prctl", - [SPECTRE_V2_USER_SECCOMP] = "User space: Mitigation: STIBP via seccomp and prctl", + [SPECTRE_V2_USER_NONE] = "User space: Vulnerable", + [SPECTRE_V2_USER_STRICT] = "User space: Mitigation: STIBP protection", + [SPECTRE_V2_USER_STRICT_PREFERRED] = "User space: Mitigation: STIBP always-on protection", + [SPECTRE_V2_USER_PRCTL] = "User space: Mitigation: STIBP via prctl", + [SPECTRE_V2_USER_SECCOMP] = "User space: Mitigation: STIBP via seccomp and prctl", }; static const struct { @@ -655,23 +751,36 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd) pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n", static_key_enabled(&switch_mm_always_ibpb) ? "always-on" : "conditional"); + + spectre_v2_user_ibpb = mode; } - /* If enhanced IBRS is enabled no STIPB required */ - if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED) + /* + * If enhanced IBRS is enabled or SMT impossible, STIBP is not + * required. + */ + if (!smt_possible || spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED) return; /* - * If SMT is not possible or STIBP is not available clear the STIPB - * mode. + * At this point, an STIBP mode other than "off" has been set. + * If STIBP support is not being forced, check if STIBP always-on + * is preferred. */ - if (!smt_possible || !boot_cpu_has(X86_FEATURE_STIBP)) + if (mode != SPECTRE_V2_USER_STRICT && + boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON)) + mode = SPECTRE_V2_USER_STRICT_PREFERRED; + + /* + * If STIBP is not available, clear the STIBP mode. + */ + if (!boot_cpu_has(X86_FEATURE_STIBP)) mode = SPECTRE_V2_USER_NONE; + + spectre_v2_user_stibp = mode; + set_mode: - spectre_v2_user = mode; - /* Only print the STIBP mode when SMT possible */ - if (smt_possible) - pr_info("%s\n", spectre_v2_user_strings[mode]); + pr_info("%s\n", spectre_v2_user_strings[mode]); } static const char * const spectre_v2_strings[] = { @@ -902,10 +1011,11 @@ void arch_smt_update(void) { mutex_lock(&spec_ctrl_mutex); - switch (spectre_v2_user) { + switch (spectre_v2_user_stibp) { case SPECTRE_V2_USER_NONE: break; case SPECTRE_V2_USER_STRICT: + case SPECTRE_V2_USER_STRICT_PREFERRED: update_stibp_strict(); break; case SPECTRE_V2_USER_PRCTL: @@ -1134,13 +1244,19 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl) { switch (ctrl) { case PR_SPEC_ENABLE: - if (spectre_v2_user == SPECTRE_V2_USER_NONE) + if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && + spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) return 0; /* * Indirect branch speculation is always disabled in strict - * mode. + * mode. It can neither be enabled if it was force-disabled + * by a previous prctl call. + */ - if (spectre_v2_user == SPECTRE_V2_USER_STRICT) + if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT || + spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || + spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED || + task_spec_ib_force_disable(task)) return -EPERM; task_clear_spec_ib_disable(task); task_update_spec_tif(task); @@ -1151,9 +1267,12 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl) * Indirect branch speculation is always allowed when * mitigation is force disabled. */ - if (spectre_v2_user == SPECTRE_V2_USER_NONE) + if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && + spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) return -EPERM; - if (spectre_v2_user == SPECTRE_V2_USER_STRICT) + if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT || + spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || + spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED) return 0; task_set_spec_ib_disable(task); if (ctrl == PR_SPEC_FORCE_DISABLE) @@ -1184,7 +1303,8 @@ void arch_seccomp_spec_mitigate(struct task_struct *task) { if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP) ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE); - if (spectre_v2_user == SPECTRE_V2_USER_SECCOMP) + if (spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP || + spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP) ib_prctl_set(task, PR_SPEC_FORCE_DISABLE); } #endif @@ -1213,21 +1333,24 @@ static int ib_prctl_get(struct task_struct *task) if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) return PR_SPEC_NOT_AFFECTED; - switch (spectre_v2_user) { - case SPECTRE_V2_USER_NONE: + if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && + spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) return PR_SPEC_ENABLE; - case SPECTRE_V2_USER_PRCTL: - case SPECTRE_V2_USER_SECCOMP: + else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT || + spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || + spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED) + return PR_SPEC_DISABLE; + else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL || + spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP || + spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL || + spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP) { if (task_spec_ib_force_disable(task)) return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE; if (task_spec_ib_disable(task)) return PR_SPEC_PRCTL | PR_SPEC_DISABLE; return PR_SPEC_PRCTL | PR_SPEC_ENABLE; - case SPECTRE_V2_USER_STRICT: - return PR_SPEC_DISABLE; - default: + } else return PR_SPEC_NOT_AFFECTED; - } } int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) @@ -1466,11 +1589,13 @@ static char *stibp_state(void) if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED) return ""; - switch (spectre_v2_user) { + switch (spectre_v2_user_stibp) { case SPECTRE_V2_USER_NONE: return ", STIBP: disabled"; case SPECTRE_V2_USER_STRICT: return ", STIBP: forced"; + case SPECTRE_V2_USER_STRICT_PREFERRED: + return ", STIBP: always-on"; case SPECTRE_V2_USER_PRCTL: case SPECTRE_V2_USER_SECCOMP: if (static_key_enabled(&switch_to_cond_stibp)) @@ -1491,6 +1616,11 @@ static char *ibpb_state(void) return ""; } +static ssize_t srbds_show_state(char *buf) +{ + return sprintf(buf, "%s\n", srbds_strings[srbds_mitigation]); +} + static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr, char *buf, unsigned int bug) { @@ -1532,6 +1662,9 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr case X86_BUG_ITLB_MULTIHIT: return itlb_multihit_show_state(buf); + case X86_BUG_SRBDS: + return srbds_show_state(buf); + default: break; } @@ -1578,4 +1711,9 @@ ssize_t cpu_show_itlb_multihit(struct device *dev, struct device_attribute *attr { return cpu_show_common(dev, attr, buf, X86_BUG_ITLB_MULTIHIT); } + +ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *buf) +{ + return cpu_show_common(dev, attr, buf, X86_BUG_SRBDS); +} #endif diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 7b4141889919083c9d4800c7d08b97708339f2d1..64066a2497e42bad7612557d105991a9249d48f8 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -854,6 +854,7 @@ void get_cpu_cap(struct cpuinfo_x86 *c) else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36)) c->x86_phys_bits = 36; #endif + c->x86_cache_bits = c->x86_phys_bits; if (c->extended_cpuid_level >= 0x8000000a) c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a); @@ -894,7 +895,6 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c) } } #endif - c->x86_cache_bits = c->x86_phys_bits; } #define NO_SPECULATION BIT(0) @@ -964,9 +964,30 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = { {} }; -static bool __init cpu_matches(unsigned long which) +#define VULNBL_INTEL_STEPPINGS(model, steppings, issues) \ + X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE(INTEL, 6, \ + INTEL_FAM6_##model, steppings, \ + X86_FEATURE_ANY, issues) + +#define SRBDS BIT(0) + +static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = { + VULNBL_INTEL_STEPPINGS(IVYBRIDGE, X86_STEPPING_ANY, SRBDS), + VULNBL_INTEL_STEPPINGS(HASWELL_CORE, X86_STEPPING_ANY, SRBDS), + VULNBL_INTEL_STEPPINGS(HASWELL_ULT, X86_STEPPING_ANY, SRBDS), + VULNBL_INTEL_STEPPINGS(HASWELL_GT3E, X86_STEPPING_ANY, SRBDS), + VULNBL_INTEL_STEPPINGS(BROADWELL_GT3E, X86_STEPPING_ANY, SRBDS), + VULNBL_INTEL_STEPPINGS(BROADWELL_CORE, X86_STEPPING_ANY, SRBDS), + VULNBL_INTEL_STEPPINGS(SKYLAKE_MOBILE, X86_STEPPING_ANY, SRBDS), + VULNBL_INTEL_STEPPINGS(SKYLAKE_DESKTOP, X86_STEPPING_ANY, SRBDS), + VULNBL_INTEL_STEPPINGS(KABYLAKE_MOBILE, X86_STEPPINGS(0x0, 0xC), SRBDS), + VULNBL_INTEL_STEPPINGS(KABYLAKE_DESKTOP,X86_STEPPINGS(0x0, 0xD), SRBDS), + {} +}; + +static bool __init cpu_matches(const struct x86_cpu_id *table, unsigned long which) { - const struct x86_cpu_id *m = x86_match_cpu(cpu_vuln_whitelist); + const struct x86_cpu_id *m = x86_match_cpu(table); return m && !!(m->driver_data & which); } @@ -986,29 +1007,32 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) u64 ia32_cap = x86_read_arch_cap_msr(); /* Set ITLB_MULTIHIT bug if cpu is not in the whitelist and not mitigated */ - if (!cpu_matches(NO_ITLB_MULTIHIT) && !(ia32_cap & ARCH_CAP_PSCHANGE_MC_NO)) + if (!cpu_matches(cpu_vuln_whitelist, NO_ITLB_MULTIHIT) && + !(ia32_cap & ARCH_CAP_PSCHANGE_MC_NO)) setup_force_cpu_bug(X86_BUG_ITLB_MULTIHIT); - if (cpu_matches(NO_SPECULATION)) + if (cpu_matches(cpu_vuln_whitelist, NO_SPECULATION)) return; setup_force_cpu_bug(X86_BUG_SPECTRE_V1); setup_force_cpu_bug(X86_BUG_SPECTRE_V2); - if (!cpu_matches(NO_SSB) && !(ia32_cap & ARCH_CAP_SSB_NO) && + if (!cpu_matches(cpu_vuln_whitelist, NO_SSB) && + !(ia32_cap & ARCH_CAP_SSB_NO) && !cpu_has(c, X86_FEATURE_AMD_SSB_NO)) setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS); if (ia32_cap & ARCH_CAP_IBRS_ALL) setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED); - if (!cpu_matches(NO_MDS) && !(ia32_cap & ARCH_CAP_MDS_NO)) { + if (!cpu_matches(cpu_vuln_whitelist, NO_MDS) && + !(ia32_cap & ARCH_CAP_MDS_NO)) { setup_force_cpu_bug(X86_BUG_MDS); - if (cpu_matches(MSBDS_ONLY)) + if (cpu_matches(cpu_vuln_whitelist, MSBDS_ONLY)) setup_force_cpu_bug(X86_BUG_MSBDS_ONLY); } - if (!cpu_matches(NO_SWAPGS)) + if (!cpu_matches(cpu_vuln_whitelist, NO_SWAPGS)) setup_force_cpu_bug(X86_BUG_SWAPGS); /* @@ -1026,7 +1050,16 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) (ia32_cap & ARCH_CAP_TSX_CTRL_MSR))) setup_force_cpu_bug(X86_BUG_TAA); - if (cpu_matches(NO_MELTDOWN)) + /* + * SRBDS affects CPUs which support RDRAND or RDSEED and are listed + * in the vulnerability blacklist. + */ + if ((cpu_has(c, X86_FEATURE_RDRAND) || + cpu_has(c, X86_FEATURE_RDSEED)) && + cpu_matches(cpu_vuln_blacklist, SRBDS)) + setup_force_cpu_bug(X86_BUG_SRBDS); + + if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN)) return; /* Rogue Data Cache Load? No! */ @@ -1035,7 +1068,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN); - if (cpu_matches(NO_L1TF)) + if (cpu_matches(cpu_vuln_whitelist, NO_L1TF)) return; setup_force_cpu_bug(X86_BUG_L1TF); @@ -1451,6 +1484,7 @@ void identify_secondary_cpu(struct cpuinfo_x86 *c) mtrr_ap_init(); validate_apic_and_package_id(c); x86_spec_ctrl_setup_ap(); + update_srbds_msr(); } static __init int setup_noclflush(char *arg) diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h index db10a63687d346e993c6b6c26efca656f97f72a9..432058e5e44bea2ed97a5490f783f21f0cc5a465 100644 --- a/arch/x86/kernel/cpu/cpu.h +++ b/arch/x86/kernel/cpu/cpu.h @@ -69,6 +69,7 @@ extern int detect_ht_early(struct cpuinfo_x86 *c); unsigned int aperfmperf_get_khz(int cpu); extern void x86_spec_ctrl_setup_ap(void); +extern void update_srbds_msr(void); extern u64 x86_read_arch_cap_msr(void); diff --git a/arch/x86/kernel/cpu/match.c b/arch/x86/kernel/cpu/match.c index 3fed38812eea340ff1ca2adb4b47534de1e73a59..751e590574660eaec484a8d6eedc912ebcd18e17 100644 --- a/arch/x86/kernel/cpu/match.c +++ b/arch/x86/kernel/cpu/match.c @@ -34,13 +34,18 @@ const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id *match) const struct x86_cpu_id *m; struct cpuinfo_x86 *c = &boot_cpu_data; - for (m = match; m->vendor | m->family | m->model | m->feature; m++) { + for (m = match; + m->vendor | m->family | m->model | m->steppings | m->feature; + m++) { if (m->vendor != X86_VENDOR_ANY && c->x86_vendor != m->vendor) continue; if (m->family != X86_FAMILY_ANY && c->x86 != m->family) continue; if (m->model != X86_MODEL_ANY && c->x86_model != m->model) continue; + if (m->steppings != X86_STEPPING_ANY && + !(BIT(c->x86_stepping) & m->steppings)) + continue; if (m->feature != X86_FEATURE_ANY && !cpu_has(c, m->feature)) continue; return m; diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c index 87a57b7642d3673420b272fec0f442b9baf76414..4b900035f2202320e2c26891d130d16eb6eb5414 100644 --- a/arch/x86/kernel/fpu/xstate.c +++ b/arch/x86/kernel/fpu/xstate.c @@ -964,18 +964,31 @@ static inline bool xfeatures_mxcsr_quirk(u64 xfeatures) return true; } -/* - * This is similar to user_regset_copyout(), but will not add offset to - * the source data pointer or increment pos, count, kbuf, and ubuf. - */ -static inline void -__copy_xstate_to_kernel(void *kbuf, const void *data, - unsigned int offset, unsigned int size, unsigned int size_total) +static void fill_gap(unsigned to, void **kbuf, unsigned *pos, unsigned *count) { - if (offset < size_total) { - unsigned int copy = min(size, size_total - offset); + if (*pos < to) { + unsigned size = to - *pos; + + if (size > *count) + size = *count; + memcpy(*kbuf, (void *)&init_fpstate.xsave + *pos, size); + *kbuf += size; + *pos += size; + *count -= size; + } +} - memcpy(kbuf + offset, data, copy); +static void copy_part(unsigned offset, unsigned size, void *from, + void **kbuf, unsigned *pos, unsigned *count) +{ + fill_gap(offset, kbuf, pos, count); + if (size > *count) + size = *count; + if (size) { + memcpy(*kbuf, from, size); + *kbuf += size; + *pos += size; + *count -= size; } } @@ -988,8 +1001,9 @@ __copy_xstate_to_kernel(void *kbuf, const void *data, */ int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int offset_start, unsigned int size_total) { - unsigned int offset, size; struct xstate_header header; + const unsigned off_mxcsr = offsetof(struct fxregs_state, mxcsr); + unsigned count = size_total; int i; /* @@ -1005,46 +1019,42 @@ int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int of header.xfeatures = xsave->header.xfeatures; header.xfeatures &= ~XFEATURE_MASK_SUPERVISOR; + if (header.xfeatures & XFEATURE_MASK_FP) + copy_part(0, off_mxcsr, + &xsave->i387, &kbuf, &offset_start, &count); + if (header.xfeatures & (XFEATURE_MASK_SSE | XFEATURE_MASK_YMM)) + copy_part(off_mxcsr, MXCSR_AND_FLAGS_SIZE, + &xsave->i387.mxcsr, &kbuf, &offset_start, &count); + if (header.xfeatures & XFEATURE_MASK_FP) + copy_part(offsetof(struct fxregs_state, st_space), 128, + &xsave->i387.st_space, &kbuf, &offset_start, &count); + if (header.xfeatures & XFEATURE_MASK_SSE) + copy_part(xstate_offsets[XFEATURE_SSE], 256, + &xsave->i387.xmm_space, &kbuf, &offset_start, &count); + /* + * Fill xsave->i387.sw_reserved value for ptrace frame: + */ + copy_part(offsetof(struct fxregs_state, sw_reserved), 48, + xstate_fx_sw_bytes, &kbuf, &offset_start, &count); /* * Copy xregs_state->header: */ - offset = offsetof(struct xregs_state, header); - size = sizeof(header); - - __copy_xstate_to_kernel(kbuf, &header, offset, size, size_total); + copy_part(offsetof(struct xregs_state, header), sizeof(header), + &header, &kbuf, &offset_start, &count); - for (i = 0; i < XFEATURE_MAX; i++) { + for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) { /* * Copy only in-use xstates: */ if ((header.xfeatures >> i) & 1) { void *src = __raw_xsave_addr(xsave, 1 << i); - offset = xstate_offsets[i]; - size = xstate_sizes[i]; - - /* The next component has to fit fully into the output buffer: */ - if (offset + size > size_total) - break; - - __copy_xstate_to_kernel(kbuf, src, offset, size, size_total); + copy_part(xstate_offsets[i], xstate_sizes[i], + src, &kbuf, &offset_start, &count); } } - - if (xfeatures_mxcsr_quirk(header.xfeatures)) { - offset = offsetof(struct fxregs_state, mxcsr); - size = MXCSR_AND_FLAGS_SIZE; - __copy_xstate_to_kernel(kbuf, &xsave->i387.mxcsr, offset, size, size_total); - } - - /* - * Fill xsave->i387.sw_reserved value for ptrace frame: - */ - offset = offsetof(struct fxregs_state, sw_reserved); - size = sizeof(xstate_fx_sw_bytes); - - __copy_xstate_to_kernel(kbuf, xstate_fx_sw_bytes, offset, size, size_total); + fill_gap(size_total, &kbuf, &offset_start, &count); return 0; } diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 9d7bb8de2917e6aa25a91cf4e767ed1120f5ee69..02665ffef0506ee39abb6d5c6af7d7c513e55c78 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -744,16 +744,11 @@ asm( NOKPROBE_SYMBOL(kretprobe_trampoline); STACK_FRAME_NON_STANDARD(kretprobe_trampoline); -static struct kprobe kretprobe_kprobe = { - .addr = (void *)kretprobe_trampoline, -}; - /* * Called from kretprobe_trampoline */ __visible __used void *trampoline_handler(struct pt_regs *regs) { - struct kprobe_ctlblk *kcb; struct kretprobe_instance *ri = NULL; struct hlist_head *head, empty_rp; struct hlist_node *tmp; @@ -763,16 +758,12 @@ __visible __used void *trampoline_handler(struct pt_regs *regs) void *frame_pointer; bool skipped = false; - preempt_disable(); - /* * Set a dummy kprobe for avoiding kretprobe recursion. * Since kretprobe never run in kprobe handler, kprobe must not * be running at this point. */ - kcb = get_kprobe_ctlblk(); - __this_cpu_write(current_kprobe, &kretprobe_kprobe); - kcb->kprobe_status = KPROBE_HIT_ACTIVE; + kprobe_busy_begin(); INIT_HLIST_HEAD(&empty_rp); kretprobe_hash_lock(current, &head, &flags); @@ -851,7 +842,7 @@ __visible __used void *trampoline_handler(struct pt_regs *regs) __this_cpu_write(current_kprobe, &ri->rp->kp); ri->ret_addr = correct_ret_addr; ri->rp->handler(ri, regs); - __this_cpu_write(current_kprobe, &kretprobe_kprobe); + __this_cpu_write(current_kprobe, &kprobe_busy); } recycle_rp_inst(ri, &empty_rp); @@ -867,8 +858,7 @@ __visible __used void *trampoline_handler(struct pt_regs *regs) kretprobe_hash_unlock(current, &flags); - __this_cpu_write(current_kprobe, NULL); - preempt_enable(); + kprobe_busy_end(); hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { hlist_del(&ri->hlist); diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index d2ef967bfafb688f377ad77d8ca19d77110db8e9..a07b09f68e7ee24f935c9fffc4da67752bcd36ca 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -414,28 +414,20 @@ static __always_inline void __speculation_ctrl_update(unsigned long tifp, u64 msr = x86_spec_ctrl_base; bool updmsr = false; - /* - * If TIF_SSBD is different, select the proper mitigation - * method. Note that if SSBD mitigation is disabled or permanentely - * enabled this branch can't be taken because nothing can set - * TIF_SSBD. - */ - if (tif_diff & _TIF_SSBD) { - if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) { + /* Handle change of TIF_SSBD depending on the mitigation method. */ + if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) { + if (tif_diff & _TIF_SSBD) amd_set_ssb_virt_state(tifn); - } else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) { + } else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) { + if (tif_diff & _TIF_SSBD) amd_set_core_ssb_state(tifn); - } else if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) || - static_cpu_has(X86_FEATURE_AMD_SSBD)) { - msr |= ssbd_tif_to_spec_ctrl(tifn); - updmsr = true; - } + } else if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) || + static_cpu_has(X86_FEATURE_AMD_SSBD)) { + updmsr |= !!(tif_diff & _TIF_SSBD); + msr |= ssbd_tif_to_spec_ctrl(tifn); } - /* - * Only evaluate TIF_SPEC_IB if conditional STIBP is enabled, - * otherwise avoid the MSR write. - */ + /* Only evaluate TIF_SPEC_IB if conditional STIBP is enabled. */ if (IS_ENABLED(CONFIG_SMP) && static_branch_unlikely(&switch_to_cond_stibp)) { updmsr |= !!(tif_diff & _TIF_SPEC_IB); diff --git a/arch/x86/kernel/process.h b/arch/x86/kernel/process.h index 898e97cf6629d5d8569404a004eb08b31bc3ef96..320ab978fb1f3149b314beb8003c5b224490e7ca 100644 --- a/arch/x86/kernel/process.h +++ b/arch/x86/kernel/process.h @@ -19,7 +19,7 @@ static inline void switch_to_extra(struct task_struct *prev, if (IS_ENABLED(CONFIG_SMP)) { /* * Avoid __switch_to_xtra() invocation when conditional - * STIPB is disabled and the only different bit is + * STIBP is disabled and the only different bit is * TIF_SPEC_IB. For CONFIG_SMP=n TIF_SPEC_IB is not * in the TIF_WORK_CTXSW masks. */ diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index c663d5fcff2eec180647c2c81f72018071a4506c..b7663a1f89ee5e9fedd00b7c3156ec709ca3a035 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -197,6 +197,14 @@ static const struct dmi_system_id reboot_dmi_table[] __initconst = { DMI_MATCH(DMI_PRODUCT_NAME, "MacBook5"), }, }, + { /* Handle problems with rebooting on Apple MacBook6,1 */ + .callback = set_pci_reboot, + .ident = "Apple MacBook6,1", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBook6,1"), + }, + }, { /* Handle problems with rebooting on Apple MacBookPro5 */ .callback = set_pci_reboot, .ident = "Apple MacBookPro5", diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 30447d210f37e272a08d7af707544ed2622ebf49..66f2a950935a51cbff1d834aaec575b00e832d20 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -270,6 +270,14 @@ static void notrace start_secondary(void *unused) wmb(); cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); + + /* + * Prevent tail call to cpu_startup_entry() because the stack protector + * guard has been changed a couple of function calls up, in + * boot_init_stack_canary() and must not be checked before tail calling + * another function. + */ + prevent_tail_call_optimization(); } /** diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c index ab0176ae985b8b52fc39940ae6dda5556f7945ce..12f90f17f4f6ac4ba2e835c01575dd7c1e118382 100644 --- a/arch/x86/kernel/time.c +++ b/arch/x86/kernel/time.c @@ -24,10 +24,6 @@ #include #include -#ifdef CONFIG_X86_64 -__visible volatile unsigned long jiffies __cacheline_aligned_in_smp = INITIAL_JIFFIES; -#endif - unsigned long profile_pc(struct pt_regs *regs) { unsigned long pc = instruction_pointer(regs); diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c index 3bbb399f7ead3ed5ff670ea2c1100721dbd8728e..a9a55e76a43f9e36ebdfeb05717f7a34e08664f3 100644 --- a/arch/x86/kernel/unwind_orc.c +++ b/arch/x86/kernel/unwind_orc.c @@ -90,9 +90,6 @@ static struct orc_entry null_orc_entry = { static struct orc_entry *orc_find(unsigned long ip) { - if (!orc_init) - return NULL; - if (ip == 0) return &null_orc_entry; @@ -258,12 +255,19 @@ EXPORT_SYMBOL_GPL(unwind_get_return_address); unsigned long *unwind_get_return_address_ptr(struct unwind_state *state) { + struct task_struct *task = state->task; + if (unwind_done(state)) return NULL; if (state->regs) return &state->regs->ip; + if (task != current && state->sp == task->thread.sp) { + struct inactive_task_frame *frame = (void *)task->thread.sp; + return &frame->ret_addr; + } + if (state->sp) return (unsigned long *)state->sp - 1; @@ -460,7 +464,7 @@ bool unwind_next_frame(struct unwind_state *state) default: orc_warn("unknown .orc_unwind entry type %d for ip %pB\n", orc->type, (void *)orig_ip); - break; + goto done; } /* Find BP: */ @@ -511,17 +515,20 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task, memset(state, 0, sizeof(*state)); state->task = task; + if (!orc_init) + goto err; + /* * Refuse to unwind the stack of a task while it's executing on another * CPU. This check is racy, but that's ok: the unwinder has other * checks to prevent it from going off the rails. */ if (task_on_another_cpu(task)) - goto done; + goto err; if (regs) { if (user_mode(regs)) - goto done; + goto the_end; state->ip = regs->ip; state->sp = kernel_stack_pointer(regs); @@ -554,6 +561,7 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task, * generate some kind of backtrace if this happens. */ void *next_page = (void *)PAGE_ALIGN((unsigned long)state->sp); + state->error = true; if (get_stack_info(next_page, state->task, &state->stack_info, &state->stack_mask)) return; @@ -574,13 +582,14 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task, /* Otherwise, skip ahead to the user-specified starting frame: */ while (!unwind_done(state) && (!on_stack(&state->stack_info, first_frame, sizeof(long)) || - state->sp <= (unsigned long)first_frame)) + state->sp < (unsigned long)first_frame)) unwind_next_frame(state); return; -done: +err: + state->error = true; +the_end: state->stack_info.type = STACK_TYPE_UNKNOWN; - return; } EXPORT_SYMBOL_GPL(__unwind_start); diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 2e994fff9164b6fc9d2dbc5cb93ca1d1fe0bf6ea..5d34ea68298b4033208094a925903a3f56f4fdfd 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -36,13 +36,13 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT) #ifdef CONFIG_X86_32 OUTPUT_ARCH(i386) ENTRY(phys_startup_32) -jiffies = jiffies_64; #else OUTPUT_ARCH(i386:x86-64) ENTRY(phys_startup_64) -jiffies_64 = jiffies; #endif +jiffies = jiffies_64; + #if defined(CONFIG_X86_64) /* * On 64-bit, align RODATA to 2MB so we retain large page mappings for diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h index f500293dad8d6122704958be16bf343fe154ef15..ce4a9f1f845e5ccf54f55bcba190e0c108d536a1 100644 --- a/arch/x86/kvm/kvm_cache_regs.h +++ b/arch/x86/kvm/kvm_cache_regs.h @@ -5,7 +5,7 @@ #define KVM_POSSIBLE_CR0_GUEST_BITS X86_CR0_TS #define KVM_POSSIBLE_CR4_GUEST_BITS \ (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \ - | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_PGE) + | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_PGE | X86_CR4_TSD) static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, enum kvm_reg reg) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index e5af08b581320ca617909d3748f9896b050f130e..1cceee0ed580ded5f1aece42bd8445ef5a6ef2b3 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -263,6 +263,11 @@ static const u64 shadow_nonpresent_or_rsvd_mask_len = 5; */ static u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask; +/* + * The number of non-reserved physical address bits irrespective of features + * that repurpose legal bits, e.g. MKTME. + */ +static u8 __read_mostly shadow_phys_bits; static void mmu_spte_set(u64 *sptep, u64 spte); static void mmu_free_roots(struct kvm_vcpu *vcpu); @@ -275,11 +280,18 @@ static bool is_executable_pte(u64 spte); void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value) { BUG_ON((mmio_mask & mmio_value) != mmio_value); + WARN_ON(mmio_value & (shadow_nonpresent_or_rsvd_mask << shadow_nonpresent_or_rsvd_mask_len)); + WARN_ON(mmio_value & shadow_nonpresent_or_rsvd_lower_gfn_mask); shadow_mmio_value = mmio_value | SPTE_SPECIAL_MASK; shadow_mmio_mask = mmio_mask | SPTE_SPECIAL_MASK; } EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask); +static bool is_mmio_spte(u64 spte) +{ + return (spte & shadow_mmio_mask) == shadow_mmio_value; +} + static inline bool sp_ad_disabled(struct kvm_mmu_page *sp) { return sp->role.ad_disabled; @@ -287,7 +299,7 @@ static inline bool sp_ad_disabled(struct kvm_mmu_page *sp) static inline bool spte_ad_enabled(u64 spte) { - MMU_WARN_ON((spte & shadow_mmio_mask) == shadow_mmio_value); + MMU_WARN_ON(is_mmio_spte(spte)); return !(spte & shadow_acc_track_value); } @@ -298,13 +310,13 @@ static bool is_nx_huge_page_enabled(void) static inline u64 spte_shadow_accessed_mask(u64 spte) { - MMU_WARN_ON((spte & shadow_mmio_mask) == shadow_mmio_value); + MMU_WARN_ON(is_mmio_spte(spte)); return spte_ad_enabled(spte) ? shadow_accessed_mask : 0; } static inline u64 spte_shadow_dirty_mask(u64 spte) { - MMU_WARN_ON((spte & shadow_mmio_mask) == shadow_mmio_value); + MMU_WARN_ON(is_mmio_spte(spte)); return spte_ad_enabled(spte) ? shadow_dirty_mask : 0; } @@ -374,11 +386,6 @@ static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn, mmu_spte_set(sptep, mask); } -static bool is_mmio_spte(u64 spte) -{ - return (spte & shadow_mmio_mask) == shadow_mmio_value; -} - static gfn_t get_mmio_spte_gfn(u64 spte) { u64 gpa = spte & shadow_nonpresent_or_rsvd_lower_gfn_mask; @@ -443,6 +450,21 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, } EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes); +static u8 kvm_get_shadow_phys_bits(void) +{ + /* + * boot_cpu_data.x86_phys_bits is reduced when MKTME is detected + * in CPU detection code, but MKTME treats those reduced bits as + * 'keyID' thus they are not reserved bits. Therefore for MKTME + * we should still return physical address bits reported by CPUID. + */ + if (!boot_cpu_has(X86_FEATURE_TME) || + WARN_ON_ONCE(boot_cpu_data.extended_cpuid_level < 0x80000008)) + return boot_cpu_data.x86_phys_bits; + + return cpuid_eax(0x80000008) & 0xff; +} + static void kvm_mmu_reset_all_pte_masks(void) { u8 low_phys_bits; @@ -456,20 +478,29 @@ static void kvm_mmu_reset_all_pte_masks(void) shadow_present_mask = 0; shadow_acc_track_mask = 0; + shadow_phys_bits = kvm_get_shadow_phys_bits(); + /* * If the CPU has 46 or less physical address bits, then set an * appropriate mask to guard against L1TF attacks. Otherwise, it is * assumed that the CPU is not vulnerable to L1TF. + * + * Some Intel CPUs address the L1 cache using more PA bits than are + * reported by CPUID. Use the PA width of the L1 cache when possible + * to achieve more effective mitigation, e.g. if system RAM overlaps + * the most significant bits of legal physical address space. */ + shadow_nonpresent_or_rsvd_mask = 0; low_phys_bits = boot_cpu_data.x86_phys_bits; - if (boot_cpu_data.x86_phys_bits < - 52 - shadow_nonpresent_or_rsvd_mask_len) { + if (boot_cpu_has_bug(X86_BUG_L1TF) && + !WARN_ON_ONCE(boot_cpu_data.x86_cache_bits >= + 52 - shadow_nonpresent_or_rsvd_mask_len)) { + low_phys_bits = boot_cpu_data.x86_cache_bits + - shadow_nonpresent_or_rsvd_mask_len; shadow_nonpresent_or_rsvd_mask = - rsvd_bits(boot_cpu_data.x86_phys_bits - - shadow_nonpresent_or_rsvd_mask_len, - boot_cpu_data.x86_phys_bits - 1); - low_phys_bits -= shadow_nonpresent_or_rsvd_mask_len; + rsvd_bits(low_phys_bits, boot_cpu_data.x86_cache_bits - 1); } + shadow_nonpresent_or_rsvd_lower_gfn_mask = GENMASK_ULL(low_phys_bits - 1, PAGE_SHIFT); } @@ -1682,10 +1713,10 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, * Emulate arch specific page modification logging for the * nested hypervisor */ -int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu) +int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu, gpa_t l2_gpa) { if (kvm_x86_ops->write_log_dirty) - return kvm_x86_ops->write_log_dirty(vcpu); + return kvm_x86_ops->write_log_dirty(vcpu, l2_gpa); return 0; } @@ -4213,7 +4244,7 @@ __reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, nonleaf_bit8_rsvd | rsvd_bits(7, 7) | rsvd_bits(maxphyaddr, 51); rsvd_check->rsvd_bits_mask[0][2] = exb_bit_rsvd | - nonleaf_bit8_rsvd | gbpages_bit_rsvd | + gbpages_bit_rsvd | rsvd_bits(maxphyaddr, 51); rsvd_check->rsvd_bits_mask[0][1] = exb_bit_rsvd | rsvd_bits(maxphyaddr, 51); @@ -4305,7 +4336,7 @@ reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context) */ shadow_zero_check = &context->shadow_zero_check; __reset_rsvds_bits_mask(vcpu, shadow_zero_check, - boot_cpu_data.x86_phys_bits, + shadow_phys_bits, context->shadow_root_level, uses_nx, guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES), is_pse(vcpu), true); @@ -4342,13 +4373,13 @@ reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, if (boot_cpu_is_amd()) __reset_rsvds_bits_mask(vcpu, shadow_zero_check, - boot_cpu_data.x86_phys_bits, + shadow_phys_bits, context->shadow_root_level, false, boot_cpu_has(X86_FEATURE_GBPAGES), true, true); else __reset_rsvds_bits_mask_ept(shadow_zero_check, - boot_cpu_data.x86_phys_bits, + shadow_phys_bits, false); if (!shadow_me_mask) @@ -4369,7 +4400,7 @@ reset_ept_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context, bool execonly) { __reset_rsvds_bits_mask_ept(&context->shadow_zero_check, - boot_cpu_data.x86_phys_bits, execonly); + shadow_phys_bits, execonly); } #define BYTE_MASK(access) \ @@ -5666,6 +5697,25 @@ static int set_nx_huge_pages(const char *val, const struct kernel_param *kp) return 0; } +static void kvm_set_mmio_spte_mask(void) +{ + u64 mask; + + /* + * Set a reserved PA bit in MMIO SPTEs to generate page faults with + * PFEC.RSVD=1 on MMIO accesses. 64-bit PTEs (PAE, x86-64, and EPT + * paging) support a maximum of 52 bits of PA, i.e. if the CPU supports + * 52-bit physical addresses then there are no reserved PA bits in the + * PTEs and so the reserved PA approach must be disabled. + */ + if (shadow_phys_bits < 52) + mask = BIT_ULL(51) | PT_PRESENT_MASK; + else + mask = 0; + + kvm_mmu_set_mmio_spte_mask(mask, mask); +} + int kvm_mmu_module_init(void) { if (nx_huge_pages == -1) @@ -5673,6 +5723,8 @@ int kvm_mmu_module_init(void) kvm_mmu_reset_all_pte_masks(); + kvm_set_mmio_spte_mask(); + pte_list_desc_cache = kmem_cache_create("pte_list_desc", sizeof(struct pte_list_desc), 0, SLAB_ACCOUNT, NULL); diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index 068feab64acf178ac48a8d36cafb94d572dd071a..816a626b6250877f65af4f1f087fcb37a6ef844d 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -194,7 +194,7 @@ void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn); void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn); bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm, struct kvm_memory_slot *slot, u64 gfn); -int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu); +int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu, gpa_t l2_gpa); int kvm_mmu_post_init_vm(struct kvm *kvm); void kvm_mmu_pre_destroy_vm(struct kvm *kvm); diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 8cf7a09bdd736fa8f6e4972fd62ae3340bc6c27f..7260a165488d222f59ecce2c1ab28851d8fccb94 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -202,7 +202,7 @@ static inline unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, u64 gpte) static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, struct guest_walker *walker, - int write_fault) + gpa_t addr, int write_fault) { unsigned level, index; pt_element_t pte, orig_pte; @@ -227,7 +227,7 @@ static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu, !(pte & PT_GUEST_DIRTY_MASK)) { trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte)); #if PTTYPE == PTTYPE_EPT - if (kvm_arch_write_log_dirty(vcpu)) + if (kvm_arch_write_log_dirty(vcpu, addr)) return -EINVAL; #endif pte |= PT_GUEST_DIRTY_MASK; @@ -424,7 +424,8 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker, (PT_GUEST_DIRTY_SHIFT - PT_GUEST_ACCESSED_SHIFT); if (unlikely(!accessed_dirty)) { - ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker, write_fault); + ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker, + addr, write_fault); if (unlikely(ret < 0)) goto error; else if (ret) diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index d636213864185958833505112e8157f79b144c27..78826d123fb86b83fcab8540c00b8f5fbb25924e 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -2757,8 +2757,8 @@ static int nested_svm_exit_special(struct vcpu_svm *svm) return NESTED_EXIT_HOST; break; case SVM_EXIT_EXCP_BASE + PF_VECTOR: - /* When we're shadowing, trap PFs, but not async PF */ - if (!npt_enabled && svm->vcpu.arch.apf.host_apf_reason == 0) + /* Trap async PF even if not shadowing */ + if (!npt_enabled || svm->vcpu.arch.apf.host_apf_reason) return NESTED_EXIT_HOST; break; default: @@ -2847,7 +2847,7 @@ static inline void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *fr dst->iopm_base_pa = from->iopm_base_pa; dst->msrpm_base_pa = from->msrpm_base_pa; dst->tsc_offset = from->tsc_offset; - dst->asid = from->asid; + /* asid not copied, it is handled manually for svm->vmcb. */ dst->tlb_ctl = from->tlb_ctl; dst->int_ctl = from->int_ctl; dst->int_vector = from->int_vector; diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index c139dedec12bc028ec34adbab6097cd79c51b37f..6876231778a9681cb2bb5f44988077889bf7a06b 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -5592,6 +5592,8 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx) static void set_cr4_guest_host_mask(struct vcpu_vmx *vmx) { + BUILD_BUG_ON(KVM_CR4_GUEST_OWNED_BITS & ~KVM_POSSIBLE_CR4_GUEST_BITS); + vmx->vcpu.arch.cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS; if (enable_ept) vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE; @@ -8711,7 +8713,7 @@ static bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason) vmcs_read32(VM_EXIT_INTR_ERROR_CODE), KVM_ISA_VMX); - switch (exit_reason) { + switch ((u16)exit_reason) { case EXIT_REASON_EXCEPTION_NMI: if (is_nmi(intr_info)) return false; @@ -9280,15 +9282,16 @@ static void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu) if (!lapic_in_kernel(vcpu)) return; + if (!flexpriority_enabled && + !cpu_has_vmx_virtualize_x2apic_mode()) + return; + /* Postpone execution until vmcs01 is the current VMCS. */ if (is_guest_mode(vcpu)) { to_vmx(vcpu)->nested.change_vmcs01_virtual_apic_mode = true; return; } - if (!cpu_need_tpr_shadow(vcpu)) - return; - sec_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL); sec_exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE); @@ -12461,11 +12464,10 @@ static void vmx_flush_log_dirty(struct kvm *kvm) kvm_flush_pml_buffers(kvm); } -static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu) +static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu, gpa_t gpa) { struct vmcs12 *vmcs12; struct vcpu_vmx *vmx = to_vmx(vcpu); - gpa_t gpa; struct page *page = NULL; u64 *pml_address; @@ -12486,7 +12488,7 @@ static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu) return 1; } - gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS) & ~0xFFFull; + gpa &= ~0xFFFull; page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->pml_address); if (is_error_page(page)) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 5f44827e496207d8a778b91f442f21bb5f5a907b..09f47c837c2587994e971f39966a468d1c7495bc 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -806,6 +806,8 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) if (is_long_mode(vcpu)) { if (!(cr4 & X86_CR4_PAE)) return 1; + if ((cr4 ^ old_cr4) & X86_CR4_LA57) + return 1; } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE) && ((cr4 ^ old_cr4) & pdptr_bits) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, @@ -2344,7 +2346,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) return kvm_mtrr_set_msr(vcpu, msr, data); case MSR_IA32_APICBASE: return kvm_set_apic_base(vcpu, msr_info); - case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff: + case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff: return kvm_x2apic_msr_write(vcpu, msr, data); case MSR_IA32_TSCDEADLINE: kvm_set_lapic_tscdeadline_msr(vcpu, data); @@ -2629,7 +2631,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_IA32_APICBASE: msr_info->data = kvm_get_apic_base(vcpu); break; - case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff: + case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff: return kvm_x2apic_msr_read(vcpu, msr_info->index, &msr_info->data); break; case MSR_IA32_TSCDEADLINE: @@ -3214,7 +3216,7 @@ static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu, unsigned bank_num = mcg_cap & 0xff, bank; r = -EINVAL; - if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS) + if (!bank_num || bank_num > KVM_MAX_MCE_BANKS) goto out; if (mcg_cap & ~(kvm_mce_cap_supported | 0xff | 0xff0000)) goto out; @@ -6291,35 +6293,6 @@ void kvm_after_handle_nmi(struct kvm_vcpu *vcpu) } EXPORT_SYMBOL_GPL(kvm_after_handle_nmi); -static void kvm_set_mmio_spte_mask(void) -{ - u64 mask; - int maxphyaddr = boot_cpu_data.x86_phys_bits; - - /* - * Set the reserved bits and the present bit of an paging-structure - * entry to generate page fault with PFER.RSV = 1. - */ - - /* - * Mask the uppermost physical address bit, which would be reserved as - * long as the supported physical address width is less than 52. - */ - mask = 1ull << 51; - - /* Set the present bit. */ - mask |= 1ull; - - /* - * If reserved bit is not supported, clear the present bit to disable - * mmio page fault. - */ - if (maxphyaddr == 52) - mask &= ~1ull; - - kvm_mmu_set_mmio_spte_mask(mask, mask); -} - #ifdef CONFIG_X86_64 static void pvclock_gtod_update_fn(struct work_struct *work) { @@ -6397,8 +6370,6 @@ int kvm_arch_init(void *opaque) if (r) goto out_free_percpu; - kvm_set_mmio_spte_mask(); - kvm_x86_ops = ops; kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK, diff --git a/arch/x86/math-emu/wm_sqrt.S b/arch/x86/math-emu/wm_sqrt.S index f031c0e19356563e8ffa23333d9f2a03e717eb9f..515cdee90df7218289b2f614e82d3c192f96b88c 100644 --- a/arch/x86/math-emu/wm_sqrt.S +++ b/arch/x86/math-emu/wm_sqrt.S @@ -209,7 +209,7 @@ sqrt_stage_2_finish: #ifdef PARANOID /* It should be possible to get here only if the arg is ffff....ffff */ - cmp $0xffffffff,FPU_fsqrt_arg_1 + cmpl $0xffffffff,FPU_fsqrt_arg_1 jnz sqrt_stage_2_error #endif /* PARANOID */ diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 32bb38f6fc18285ceb3d6e780f6f3463f2f5047a..8039a951db8f5d09b56817055e52692953582168 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -112,8 +112,6 @@ __ref void *alloc_low_pages(unsigned int num) } else { pfn = pgt_buf_end; pgt_buf_end += num; - printk(KERN_DEBUG "BRK [%#010lx, %#010lx] PGTABLE\n", - pfn << PAGE_SHIFT, (pgt_buf_end << PAGE_SHIFT) - 1); } for (i = 0; i < num; i++) { diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c index 4d434ddb75dbe6307be3b0ca13638a94094935c6..f140b2d393191c97ccc32a6b34dd2fa612ec902d 100644 --- a/arch/x86/mm/mmio-mod.c +++ b/arch/x86/mm/mmio-mod.c @@ -385,7 +385,7 @@ static void enter_uniprocessor(void) int cpu; int err; - if (downed_cpus == NULL && + if (!cpumask_available(downed_cpus) && !alloc_cpumask_var(&downed_cpus, GFP_KERNEL)) { pr_notice("Failed to allocate mask\n"); goto out; @@ -415,7 +415,7 @@ static void leave_uniprocessor(void) int cpu; int err; - if (downed_cpus == NULL || cpumask_weight(downed_cpus) == 0) + if (!cpumask_available(downed_cpus) || cpumask_weight(downed_cpus) == 0) return; pr_notice("Re-enabling CPUs...\n"); for_each_cpu(cpu, downed_cpus) { diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c index 33e9b4f1ce20ab3a059d34013c31b1c461f637c3..c177da94fc7932c0a4038714f94e4ea1b56e6139 100644 --- a/arch/x86/pci/fixup.c +++ b/arch/x86/pci/fixup.c @@ -572,6 +572,10 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, pci_invalid_bar); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6f60, pci_invalid_bar); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fa0, pci_invalid_bar); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, pci_invalid_bar); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa1ec, pci_invalid_bar); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa1ed, pci_invalid_bar); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa26c, pci_invalid_bar); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa26d, pci_invalid_bar); /* * Device [1022:7808] diff --git a/arch/x86/platform/uv/uv_irq.c b/arch/x86/platform/uv/uv_irq.c index 03fc397335b7482e9302f7d7f6b85d9c0f553df5..c9fc725a1dcf4a43f2fb55c3a733b623f9794654 100644 --- a/arch/x86/platform/uv/uv_irq.c +++ b/arch/x86/platform/uv/uv_irq.c @@ -171,9 +171,10 @@ static struct irq_domain *uv_get_irq_domain(void) goto out; uv_domain = irq_domain_create_tree(fn, &uv_domain_ops, NULL); - irq_domain_free_fwnode(fn); if (uv_domain) uv_domain->parent = x86_vector_domain; + else + irq_domain_free_fwnode(fn); out: mutex_unlock(&uv_lock); diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c index e3b18ad49889afc5ae35d2e2796aecd108a93819..41fd4c123165aaa6ad317f2b07831526188ce962 100644 --- a/arch/x86/xen/smp_pv.c +++ b/arch/x86/xen/smp_pv.c @@ -89,6 +89,7 @@ asmlinkage __visible void cpu_bringup_and_idle(void) { cpu_bringup(); cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); + prevent_tail_call_optimization(); } void xen_smp_intr_free_pv(unsigned int cpu) diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c index 92fb20777bb0ea1071bdb5768c24f97b662746b3..a19c61b2614222e8a5ab442d82ca26874907e074 100644 --- a/arch/xtensa/kernel/setup.c +++ b/arch/xtensa/kernel/setup.c @@ -711,7 +711,8 @@ c_start(struct seq_file *f, loff_t *pos) static void * c_next(struct seq_file *f, void *v, loff_t *pos) { - return NULL; + ++*pos; + return c_start(f, pos); } static void diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c index dc7b470a423a6b74f02501682b2aa709245888fd..58b79e2ea569c6419d2b16c052c0e1576b3f525e 100644 --- a/arch/xtensa/kernel/xtensa_ksyms.c +++ b/arch/xtensa/kernel/xtensa_ksyms.c @@ -82,13 +82,13 @@ void __xtensa_libgcc_window_spill(void) } EXPORT_SYMBOL(__xtensa_libgcc_window_spill); -unsigned long __sync_fetch_and_and_4(unsigned long *p, unsigned long v) +unsigned int __sync_fetch_and_and_4(volatile void *p, unsigned int v) { BUG(); } EXPORT_SYMBOL(__sync_fetch_and_and_4); -unsigned long __sync_fetch_and_or_4(unsigned long *p, unsigned long v) +unsigned int __sync_fetch_and_or_4(volatile void *p, unsigned int v) { BUG(); } diff --git a/block/bio-integrity.c b/block/bio-integrity.c index 7f80106624375a157136de2b27016b9339bb5a07..d3df44c3b43af6e35a7e9b9fb63c0b8bba849a72 100644 --- a/block/bio-integrity.c +++ b/block/bio-integrity.c @@ -315,7 +315,6 @@ bool bio_integrity_prep(struct bio *bio) if (ret == 0) { printk(KERN_ERR "could not attach integrity payload\n"); - kfree(buf); status = BLK_STS_RESOURCE; goto err_end_io; } diff --git a/block/blk-mq.c b/block/blk-mq.c index cfffcee618fb6b959c07ea14733ef05534c36a69..f4eeadcf717dbf75de73a3bf1fc4e20e4ea703ce 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2736,6 +2736,10 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, list_for_each_entry(q, &set->tag_list, tag_set_list) blk_mq_freeze_queue(q); + /* + * Sync with blk_mq_queue_tag_busy_iter. + */ + synchronize_rcu(); set->nr_hw_queues = nr_hw_queues; blk_mq_update_queue_map(set); @@ -2746,10 +2750,6 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, list_for_each_entry(q, &set->tag_list, tag_set_list) blk_mq_unfreeze_queue(q); - /* - * Sync with blk_mq_queue_tag_busy_iter. - */ - synchronize_rcu(); } void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues) diff --git a/build.config.common b/build.config.common index 6176b14b9b88c4043d4be7901b282ca4f6fd42a4..11bc1025114269d0d933ffd741ae6db4eccd2955 100644 --- a/build.config.common +++ b/build.config.common @@ -3,7 +3,7 @@ KERNEL_DIR=common CC=clang LD=ld.lld -CLANG_PREBUILT_BIN=prebuilts-master/clang/host/linux-x86/clang-r377782c/bin +CLANG_PREBUILT_BIN=prebuilts-master/clang/host/linux-x86/clang-r383902/bin BUILDTOOLS_PREBUILT_BIN=build/build-tools/path/linux-x86 EXTRA_CMDS='' diff --git a/build.config.cuttlefish.aarch64 b/build.config.cuttlefish.aarch64 index 0cb6019589729dbe4d8f9c78be25812bb5164728..02b48a171329719367d9edf0d9a22133a7dfe6b2 100644 --- a/build.config.cuttlefish.aarch64 +++ b/build.config.cuttlefish.aarch64 @@ -1,5 +1,6 @@ . ${ROOT_DIR}/common/build.config.common . ${ROOT_DIR}/common/build.config.aarch64 +LZ4_RAMDISK=1 DEFCONFIG=cuttlefish_defconfig POST_DEFCONFIG_CMDS="check_defconfig" diff --git a/build.config.cuttlefish.x86_64 b/build.config.cuttlefish.x86_64 index fed773ccc64aee16c7180bb37eaf564c2a68a583..3248353e35e039db1e32c8e0824a3d03e0ece9ca 100644 --- a/build.config.cuttlefish.x86_64 +++ b/build.config.cuttlefish.x86_64 @@ -1,5 +1,6 @@ . ${ROOT_DIR}/common/build.config.common . ${ROOT_DIR}/common/build.config.x86_64 +LZ4_RAMDISK=1 DEFCONFIG=x86_64_cuttlefish_defconfig POST_DEFCONFIG_CMDS="check_defconfig" diff --git a/crypto/af_alg.c b/crypto/af_alg.c index 0679c35adf556f8b7e605d7c73d0a31a55a4acb4..3f3b57f80bdb90193a2d1ce34f9a09f0a09d624c 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c @@ -133,21 +133,15 @@ EXPORT_SYMBOL_GPL(af_alg_release); void af_alg_release_parent(struct sock *sk) { struct alg_sock *ask = alg_sk(sk); - unsigned int nokey = ask->nokey_refcnt; - bool last = nokey && !ask->refcnt; + unsigned int nokey = atomic_read(&ask->nokey_refcnt); sk = ask->parent; ask = alg_sk(sk); - local_bh_disable(); - bh_lock_sock(sk); - ask->nokey_refcnt -= nokey; - if (!last) - last = !--ask->refcnt; - bh_unlock_sock(sk); - local_bh_enable(); + if (nokey) + atomic_dec(&ask->nokey_refcnt); - if (last) + if (atomic_dec_and_test(&ask->refcnt)) sock_put(sk); } EXPORT_SYMBOL_GPL(af_alg_release_parent); @@ -192,7 +186,7 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) err = -EBUSY; lock_sock(sk); - if (ask->refcnt | ask->nokey_refcnt) + if (atomic_read(&ask->refcnt)) goto unlock; swap(ask->type, type); @@ -241,7 +235,7 @@ static int alg_setsockopt(struct socket *sock, int level, int optname, int err = -EBUSY; lock_sock(sk); - if (ask->refcnt) + if (atomic_read(&ask->refcnt) != atomic_read(&ask->nokey_refcnt)) goto unlock; type = ask->type; @@ -308,12 +302,14 @@ int af_alg_accept(struct sock *sk, struct socket *newsock, bool kern) sk2->sk_family = PF_ALG; - if (nokey || !ask->refcnt++) + if (atomic_inc_return_relaxed(&ask->refcnt) == 1) sock_hold(sk); - ask->nokey_refcnt += nokey; + if (nokey) { + atomic_inc(&ask->nokey_refcnt); + atomic_set(&alg_sk(sk2)->nokey_refcnt, 1); + } alg_sk(sk2)->parent = sk; alg_sk(sk2)->type = type; - alg_sk(sk2)->nokey_refcnt = nokey; newsock->ops = type->ops; newsock->state = SS_CONNECTED; diff --git a/crypto/algboss.c b/crypto/algboss.c index 960d8548171be5df969ebb8a03293ab6b672e7ee..9d253e1016b1d1d02457312328329ba03b33f07d 100644 --- a/crypto/algboss.c +++ b/crypto/algboss.c @@ -194,8 +194,6 @@ static int cryptomgr_schedule_probe(struct crypto_larval *larval) if (IS_ERR(thread)) goto err_put_larval; - wait_for_completion_interruptible(&larval->completion); - return NOTIFY_STOP; err_put_larval: diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c index f138af18b5000245330d6dc43cc036b005d1fc19..379e83c8aa522c1a34f5fcc673e96592d950bc89 100644 --- a/crypto/algif_aead.c +++ b/crypto/algif_aead.c @@ -389,7 +389,7 @@ static int aead_check_key(struct socket *sock) struct alg_sock *ask = alg_sk(sk); lock_sock(sk); - if (ask->refcnt) + if (!atomic_read(&ask->nokey_refcnt)) goto unlock_child; psk = ask->parent; @@ -401,11 +401,8 @@ static int aead_check_key(struct socket *sock) if (!tfm->has_key) goto unlock; - if (!pask->refcnt++) - sock_hold(psk); - - ask->refcnt = 1; - sock_put(psk); + atomic_dec(&pask->nokey_refcnt); + atomic_set(&ask->nokey_refcnt, 0); err = 0; diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c index 39cebd3256bf8df50858cfc7f499fe6cfe268256..d987e2c90d74e30d3586336ee2be55a910801eee 100644 --- a/crypto/algif_hash.c +++ b/crypto/algif_hash.c @@ -309,7 +309,7 @@ static int hash_check_key(struct socket *sock) struct alg_sock *ask = alg_sk(sk); lock_sock(sk); - if (ask->refcnt) + if (!atomic_read(&ask->nokey_refcnt)) goto unlock_child; psk = ask->parent; @@ -321,11 +321,8 @@ static int hash_check_key(struct socket *sock) if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) goto unlock; - if (!pask->refcnt++) - sock_hold(psk); - - ask->refcnt = 1; - sock_put(psk); + atomic_dec(&pask->nokey_refcnt); + atomic_set(&ask->nokey_refcnt, 0); err = 0; diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index 90bc4e0f0785937fedb1091646ccae6e59f6ee97..d9ec5dca86729161893196c61836bda22619ad39 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c @@ -85,14 +85,10 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg, return PTR_ERR(areq); /* convert iovecs of output buffers into RX SGL */ - err = af_alg_get_rsgl(sk, msg, flags, areq, -1, &len); + err = af_alg_get_rsgl(sk, msg, flags, areq, ctx->used, &len); if (err) goto free; - /* Process only as much RX buffers for which we have TX data */ - if (len > ctx->used) - len = ctx->used; - /* * If more buffers are to be expected to be processed, process only * full block size buffers. @@ -227,7 +223,7 @@ static int skcipher_check_key(struct socket *sock) struct alg_sock *ask = alg_sk(sk); lock_sock(sk); - if (ask->refcnt) + if (!atomic_read(&ask->nokey_refcnt)) goto unlock_child; psk = ask->parent; @@ -239,11 +235,8 @@ static int skcipher_check_key(struct socket *sock) if (!tfm->has_key) goto unlock; - if (!pask->refcnt++) - sock_hold(psk); - - ask->refcnt = 1; - sock_put(psk); + atomic_dec(&pask->nokey_refcnt); + atomic_set(&ask->nokey_refcnt, 0); err = 0; diff --git a/crypto/lrw.c b/crypto/lrw.c index 886f91f2426c6483bc37482d42b7fa9da44cb2bb..1b73fec817cf7b206f9122c50ceb1b8695d468b1 100644 --- a/crypto/lrw.c +++ b/crypto/lrw.c @@ -531,7 +531,7 @@ static void exit_tfm(struct crypto_skcipher *tfm) crypto_free_skcipher(ctx->child); } -static void free(struct skcipher_instance *inst) +static void free_inst(struct skcipher_instance *inst) { crypto_drop_skcipher(skcipher_instance_ctx(inst)); kfree(inst); @@ -642,7 +642,7 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb) inst->alg.encrypt = encrypt; inst->alg.decrypt = decrypt; - inst->free = free; + inst->free = free_inst; err = skcipher_register_instance(tmpl, inst); if (err) diff --git a/crypto/xts.c b/crypto/xts.c index e31828ed00466cc08e8ee2e73bacb2d99c1e0a34..f5fba941d6f6c0d8df348f632fbc4e8eeb6eb63a 100644 --- a/crypto/xts.c +++ b/crypto/xts.c @@ -469,7 +469,7 @@ static void exit_tfm(struct crypto_skcipher *tfm) crypto_free_cipher(ctx->tweak); } -static void free(struct skcipher_instance *inst) +static void free_inst(struct skcipher_instance *inst) { crypto_drop_skcipher(skcipher_instance_ctx(inst)); kfree(inst); @@ -580,7 +580,7 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb) inst->alg.encrypt = encrypt; inst->alg.decrypt = decrypt; - inst->free = free; + inst->free = free_inst; err = skcipher_register_instance(tmpl, inst); if (err) diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c index 7bf1948b1223ba81b9f41d95b6cad670c79a69ca..732549ee1fe3dc0f2f81fa94c2619bef66390c65 100644 --- a/drivers/acpi/cppc_acpi.c +++ b/drivers/acpi/cppc_acpi.c @@ -800,6 +800,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr) "acpi_cppc"); if (ret) { per_cpu(cpc_desc_ptr, pr->id) = NULL; + kobject_put(&cpc_ptr->kobj); goto out_free; } diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c index a75f4d9a2729d23c71d15940ae10d03ca48326e4..6abd647c6750c9aef5a510afed51c98ad6cb472f 100644 --- a/drivers/acpi/device_pm.c +++ b/drivers/acpi/device_pm.c @@ -172,7 +172,7 @@ int acpi_device_set_power(struct acpi_device *device, int state) * possibly drop references to the power resources in use. */ state = ACPI_STATE_D3_HOT; - /* If _PR3 is not available, use D3hot as the target state. */ + /* If D3cold is not supported, use D3hot as the target state. */ if (!device->power.states[ACPI_STATE_D3_COLD].flags.valid) target_state = state; } else if (!device->power.states[state].flags.valid) { diff --git a/drivers/acpi/evged.c b/drivers/acpi/evged.c index 46f060356a22fecf0c81d110cf26f4a60853264b..339e6d3dba7c3b84d9bdba0b4c1789d3909c3a9a 100644 --- a/drivers/acpi/evged.c +++ b/drivers/acpi/evged.c @@ -82,6 +82,8 @@ static acpi_status acpi_ged_request_interrupt(struct acpi_resource *ares, struct resource r; struct acpi_resource_irq *p = &ares->data.irq; struct acpi_resource_extended_irq *pext = &ares->data.extended_irq; + char ev_name[5]; + u8 trigger; if (ares->type == ACPI_RESOURCE_TYPE_END_TAG) return AE_OK; @@ -90,14 +92,28 @@ static acpi_status acpi_ged_request_interrupt(struct acpi_resource *ares, dev_err(dev, "unable to parse IRQ resource\n"); return AE_ERROR; } - if (ares->type == ACPI_RESOURCE_TYPE_IRQ) + if (ares->type == ACPI_RESOURCE_TYPE_IRQ) { gsi = p->interrupts[0]; - else + trigger = p->triggering; + } else { gsi = pext->interrupts[0]; + trigger = pext->triggering; + } irq = r.start; - if (ACPI_FAILURE(acpi_get_handle(handle, "_EVT", &evt_handle))) { + switch (gsi) { + case 0 ... 255: + sprintf(ev_name, "_%c%02hhX", + trigger == ACPI_EDGE_SENSITIVE ? 'E' : 'L', gsi); + + if (ACPI_SUCCESS(acpi_get_handle(handle, ev_name, &evt_handle))) + break; + /* fall through */ + default: + if (ACPI_SUCCESS(acpi_get_handle(handle, "_EVT", &evt_handle))) + break; + dev_err(dev, "cannot locate _EVT method\n"); return AE_ERROR; } diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 2eddbb1fae6a0be9418002ff147968a24195ffb4..8bc1a778b3a4450dff4ef44e6e8b86f85068521c 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -920,12 +920,9 @@ static void acpi_bus_init_power_state(struct acpi_device *device, int state) if (buffer.length && package && package->type == ACPI_TYPE_PACKAGE - && package->package.count) { - int err = acpi_extract_power_resources(package, 0, - &ps->resources); - if (!err) - device->power.flags.power_resources = 1; - } + && package->package.count) + acpi_extract_power_resources(package, 0, &ps->resources); + ACPI_FREE(buffer.pointer); } @@ -972,14 +969,27 @@ static void acpi_bus_get_power_flags(struct acpi_device *device) acpi_bus_init_power_state(device, i); INIT_LIST_HEAD(&device->power.states[ACPI_STATE_D3_COLD].resources); - if (!list_empty(&device->power.states[ACPI_STATE_D3_HOT].resources)) - device->power.states[ACPI_STATE_D3_COLD].flags.valid = 1; - /* Set defaults for D0 and D3hot states (always valid) */ + /* Set the defaults for D0 and D3hot (always supported). */ device->power.states[ACPI_STATE_D0].flags.valid = 1; device->power.states[ACPI_STATE_D0].power = 100; device->power.states[ACPI_STATE_D3_HOT].flags.valid = 1; + /* + * Use power resources only if the D0 list of them is populated, because + * some platforms may provide _PR3 only to indicate D3cold support and + * in those cases the power resources list returned by it may be bogus. + */ + if (!list_empty(&device->power.states[ACPI_STATE_D0].resources)) { + device->power.flags.power_resources = 1; + /* + * D3cold is supported if the D3hot list of power resources is + * not empty. + */ + if (!list_empty(&device->power.states[ACPI_STATE_D3_HOT].resources)) + device->power.states[ACPI_STATE_D3_COLD].flags.valid = 1; + } + if (acpi_bus_init_power(device)) device->flags.power_manageable = 0; } diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c index 9e728a1494f64131195808bdc5068d82e3f2bb6b..1828b335c28aa0574c9e831e47cb5b62fd4a9c17 100644 --- a/drivers/acpi/sysfs.c +++ b/drivers/acpi/sysfs.c @@ -944,13 +944,13 @@ static void __exit interrupt_stats_exit(void) } static ssize_t -acpi_show_profile(struct device *dev, struct device_attribute *attr, +acpi_show_profile(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%d\n", acpi_gbl_FADT.preferred_profile); } -static const struct device_attribute pm_profile_attr = +static const struct kobj_attribute pm_profile_attr = __ATTR(pm_profile, S_IRUGO, acpi_show_profile, NULL); static ssize_t hotplug_enabled_show(struct kobject *kobj, @@ -999,8 +999,10 @@ void acpi_sysfs_add_hotplug_profile(struct acpi_hotplug_profile *hotplug, error = kobject_init_and_add(&hotplug->kobj, &acpi_hotplug_profile_ktype, hotplug_kobj, "%s", name); - if (error) + if (error) { + kobject_put(&hotplug->kobj); goto err_out; + } kobject_uevent(&hotplug->kobj, KOBJ_ADD); return; diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index 214c4e2e8ade1db216621f9921e389ca50215df2..ab1da5e6e7e3e8bc2931a0153f6de6c6d46eb9fb 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c @@ -328,6 +328,25 @@ static const struct dmi_system_id video_detect_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Precision 7510"), }, }, + { + .callback = video_detect_force_native, + .ident = "Acer Aspire 5738z", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5738"), + DMI_MATCH(DMI_BOARD_NAME, "JV50"), + }, + }, + { + /* https://bugzilla.kernel.org/show_bug.cgi?id=207835 */ + .callback = video_detect_force_native, + .ident = "Acer TravelMate 5735Z", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 5735Z"), + DMI_MATCH(DMI_BOARD_NAME, "BA51_MV"), + }, + }, /* * Desktops which falsely report a backlight and which our heuristics diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 7bd038edc1f77728e1cb800e706b5c1281ef9d10..b256a56a70d50fd2c4fcd6e7bb7f6b801b0a2c17 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -478,7 +478,8 @@ struct binder_priority { * @requested_threads_started: number binder threads started * (protected by @inner_lock) * @tmp_ref: temporary reference to indicate proc is in use - * (protected by @inner_lock) + * (atomic since @proc->inner_lock cannot + * always be acquired) * @default_priority: default scheduler priority * (invariant after initialized) * @debugfs_entry: debugfs node @@ -513,7 +514,7 @@ struct binder_proc { int max_threads; int requested_threads; int requested_threads_started; - int tmp_ref; + atomic_t tmp_ref; struct binder_priority default_priority; struct dentry *debugfs_entry; struct binder_alloc alloc; @@ -2024,9 +2025,9 @@ static void binder_thread_dec_tmpref(struct binder_thread *thread) static void binder_proc_dec_tmpref(struct binder_proc *proc) { binder_inner_proc_lock(proc); - proc->tmp_ref--; + atomic_dec(&proc->tmp_ref); if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) && - !proc->tmp_ref) { + !atomic_read(&proc->tmp_ref)) { binder_inner_proc_unlock(proc); binder_free_proc(proc); return; @@ -2088,18 +2089,26 @@ static struct binder_thread *binder_get_txn_from_and_acq_inner( static void binder_free_transaction(struct binder_transaction *t) { - struct binder_proc *target_proc = t->to_proc; + struct binder_proc *target_proc; + spin_lock(&t->lock); + target_proc = t->to_proc; if (target_proc) { + atomic_inc(&target_proc->tmp_ref); + spin_unlock(&t->lock); + binder_inner_proc_lock(target_proc); if (t->buffer) t->buffer->transaction = NULL; binder_inner_proc_unlock(target_proc); + binder_proc_dec_tmpref(target_proc); + } else { + /* + * If the transaction has no target_proc, then + * t->buffer->transaction * has already been cleared. + */ + spin_unlock(&t->lock); } - /* - * If the transaction has no target_proc, then - * t->buffer->transaction has already been cleared. - */ kfree(t); binder_stats_deleted(BINDER_STAT_TRANSACTION); } @@ -2931,7 +2940,7 @@ static struct binder_node *binder_get_node_refs_for_txn( target_node = node; binder_inc_node_nilocked(node, 1, 0, NULL); binder_inc_node_tmpref_ilocked(node); - node->proc->tmp_ref++; + atomic_inc(&node->proc->tmp_ref); *procp = node->proc; } else *error = BR_DEAD_REPLY; @@ -3028,7 +3037,7 @@ static void binder_transaction(struct binder_proc *proc, goto err_dead_binder; } target_proc = target_thread->proc; - target_proc->tmp_ref++; + atomic_inc(&target_proc->tmp_ref); binder_inner_proc_unlock(target_thread->proc); } else { if (tr->target.handle) { @@ -4641,8 +4650,15 @@ static struct binder_thread *binder_get_thread(struct binder_proc *proc) static void binder_free_proc(struct binder_proc *proc) { + struct binder_device *device; + BUG_ON(!list_empty(&proc->todo)); BUG_ON(!list_empty(&proc->delivered_death)); + device = container_of(proc->context, struct binder_device, context); + if (refcount_dec_and_test(&device->ref)) { + kfree(proc->context->name); + kfree(device); + } binder_alloc_deferred_release(&proc->alloc); put_task_struct(proc->tsk); binder_stats_deleted(BINDER_STAT_PROC); @@ -4673,7 +4689,7 @@ static int binder_thread_release(struct binder_proc *proc, * The corresponding dec is when we actually * free the thread in binder_free_thread() */ - proc->tmp_ref++; + atomic_inc(&proc->tmp_ref); /* * take a ref on this thread to ensure it * survives while we are releasing it @@ -5171,6 +5187,7 @@ static int binder_open(struct inode *nodp, struct file *filp) return -ENOMEM; spin_lock_init(&proc->inner_lock); spin_lock_init(&proc->outer_lock); + atomic_set(&proc->tmp_ref, 0); get_task_struct(current->group_leader); proc->tsk = current->group_leader; mutex_init(&proc->files_lock); @@ -5370,7 +5387,6 @@ static int binder_node_release(struct binder_node *node, int refs) static void binder_deferred_release(struct binder_proc *proc) { struct binder_context *context = proc->context; - struct binder_device *device; struct rb_node *n; int threads, nodes, incoming_refs, outgoing_refs, active_transactions; @@ -5389,18 +5405,12 @@ static void binder_deferred_release(struct binder_proc *proc) context->binder_context_mgr_node = NULL; } mutex_unlock(&context->context_mgr_node_lock); - device = container_of(proc->context, struct binder_device, context); - if (refcount_dec_and_test(&device->ref)) { - kfree(context->name); - kfree(device); - } - proc->context = NULL; binder_inner_proc_lock(proc); /* * Make sure proc stays alive after we * remove all the threads */ - proc->tmp_ref++; + atomic_inc(&proc->tmp_ref); proc->is_dead = true; threads = 0; diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index 880affe45b07921297dc109cf99240b1fa6ddac2..1b5a131278bc2ea741fed679936a0bd16a18236e 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c @@ -937,7 +937,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item, trace_binder_unmap_user_end(alloc, index); } up_read(&mm->mmap_sem); - mmput(mm); + mmput_async(mm); trace_binder_unmap_kernel_start(alloc, index); diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 33eb5e342a7a9293654859c33fcad0fc9f218a5f..a3a65f5490c029b39218970cca07cc8a813de12b 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -57,7 +57,6 @@ #include #include #include -#include #include #include #include @@ -6536,7 +6535,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht) /* perform each probe asynchronously */ for (i = 0; i < host->n_ports; i++) { struct ata_port *ap = host->ports[i]; - async_schedule(async_port_probe, ap); + ap->cookie = async_schedule(async_port_probe, ap); } return 0; @@ -6676,11 +6675,11 @@ void ata_host_detach(struct ata_host *host) { int i; - /* Ensure ata_port probe has completed */ - async_synchronize_full(); - - for (i = 0; i < host->n_ports; i++) + for (i = 0; i < host->n_ports; i++) { + /* Ensure ata_port probe has completed */ + async_synchronize_cookie(host->ports[i]->cookie + 1); ata_port_detach(host->ports[i]); + } /* the host is dead now, dissociate ACPI */ ata_acpi_dissociate(host); diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 2f81d65342709c8b75b5cbf37ced79d8f1bf6572..bc2c27f0493fc2abfcea48e07e3e3a2c40635b8c 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -3996,12 +3996,13 @@ static unsigned int ata_scsi_mode_select_xlat(struct ata_queued_cmd *qc) { struct scsi_cmnd *scmd = qc->scsicmd; const u8 *cdb = scmd->cmnd; - const u8 *p; u8 pg, spg; unsigned six_byte, pg_len, hdr_len, bd_len; int len; u16 fp = (u16)-1; u8 bp = 0xff; + u8 buffer[64]; + const u8 *p = buffer; VPRINTK("ENTER\n"); @@ -4035,12 +4036,14 @@ static unsigned int ata_scsi_mode_select_xlat(struct ata_queued_cmd *qc) if (!scsi_sg_count(scmd) || scsi_sglist(scmd)->length < len) goto invalid_param_len; - p = page_address(sg_page(scsi_sglist(scmd))); - /* Move past header and block descriptors. */ if (len < hdr_len) goto invalid_param_len; + if (!sg_copy_to_buffer(scsi_sglist(scmd), scsi_sg_count(scmd), + buffer, sizeof(buffer))) + goto invalid_param_len; + if (six_byte) bd_len = p[3]; else diff --git a/drivers/base/component.c b/drivers/base/component.c index 08da6160e94dd3521236dfdf567ac5a1ba61edc3..55f0856bd9b5e1a317c84c53ecd760a2ee8e4357 100644 --- a/drivers/base/component.c +++ b/drivers/base/component.c @@ -162,7 +162,8 @@ static int try_to_bring_up_master(struct master *master, ret = master->ops->bind(master->dev); if (ret < 0) { devres_release_group(master->dev, NULL); - dev_info(master->dev, "master bind failed: %d\n", ret); + if (ret != -EPROBE_DEFER) + dev_info(master->dev, "master bind failed: %d\n", ret); return ret; } @@ -431,8 +432,9 @@ static int component_bind(struct component *component, struct master *master, devres_release_group(component->dev, NULL); devres_release_group(master->dev, NULL); - dev_err(master->dev, "failed to bind %s (ops %ps): %d\n", - dev_name(component->dev), component->ops, ret); + if (ret != -EPROBE_DEFER) + dev_err(master->dev, "failed to bind %s (ops %ps): %d\n", + dev_name(component->dev), component->ops, ret); } return ret; diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index 040d550c87dc62a766bfc48b7ede4a6e21ab9d8a..11027b2146bcafc8b0a35bbbc45acfeb8ae10a1c 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -644,6 +644,12 @@ ssize_t __weak cpu_show_itlb_multihit(struct device *dev, return sprintf(buf, "Not affected\n"); } +ssize_t __weak cpu_show_srbds(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "Not affected\n"); +} + static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL); static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL); static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL); @@ -652,6 +658,7 @@ static DEVICE_ATTR(l1tf, 0444, cpu_show_l1tf, NULL); static DEVICE_ATTR(mds, 0444, cpu_show_mds, NULL); static DEVICE_ATTR(tsx_async_abort, 0444, cpu_show_tsx_async_abort, NULL); static DEVICE_ATTR(itlb_multihit, 0444, cpu_show_itlb_multihit, NULL); +static DEVICE_ATTR(srbds, 0444, cpu_show_srbds, NULL); static struct attribute *cpu_root_vulnerabilities_attrs[] = { &dev_attr_meltdown.attr, @@ -662,6 +669,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = { &dev_attr_mds.attr, &dev_attr_tsx_async_abort.attr, &dev_attr_itlb_multihit.attr, + &dev_attr_srbds.attr, NULL }; diff --git a/drivers/base/platform.c b/drivers/base/platform.c index bcb6519fe2113fd891e82e2b22feb3f0fa329d54..0ee3cab88f70f97116f945e6fa25ed12b7de0987 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -702,6 +702,8 @@ int __init_or_module __platform_driver_probe(struct platform_driver *drv, /* temporary section violation during probe() */ drv->probe = probe; retval = code = __platform_driver_register(drv, module); + if (retval) + return retval; /* * Fixup that section violation, being paranoid about code scanning diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index 013d0a2b3ba0ae7deea7c95ba2704bdfbdae844d..4e0cc40ad9ceb71d46440f67d4e616b8c50e51d3 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -1242,7 +1242,7 @@ static int dev_get_regmap_match(struct device *dev, void *res, void *data) /* If the user didn't specify a name match any */ if (data) - return (*r)->name == data; + return !strcmp((*r)->name, data); else return 1; } diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 39b119af65f72c57e90e30641dc9e8cb6a5c0da3..2bbd6bed1535f8f251a090a356d29fa9adcd4c16 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -1112,7 +1112,7 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) if (lo->lo_offset != info->lo_offset || lo->lo_sizelimit != info->lo_sizelimit) { sync_blockdev(lo->lo_device); - kill_bdev(lo->lo_device); + invalidate_bdev(lo->lo_device); } /* I/O need to be drained during transfer transition */ @@ -1384,11 +1384,11 @@ static int loop_set_block_size(struct loop_device *lo, unsigned long arg) return 0; sync_blockdev(lo->lo_device); - kill_bdev(lo->lo_device); + invalidate_bdev(lo->lo_device); blk_mq_freeze_queue(lo->lo_queue); - /* kill_bdev should have truncated all the pages */ + /* invalidate_bdev should have truncated all the pages */ if (lo->lo_device->bd_inode->i_mapping->nrpages) { err = -EAGAIN; pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n", diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 8f56e6b2f114f17fb49d5824f22987097f7e0524..f22fad977c9138ec54c00fbf3201e4b724c0532c 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -960,25 +960,26 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg, test_bit(NBD_BOUND, &config->runtime_flags))) { dev_err(disk_to_dev(nbd->disk), "Device being setup by another task"); - sockfd_put(sock); - return -EBUSY; + err = -EBUSY; + goto put_socket; + } + + nsock = kzalloc(sizeof(*nsock), GFP_KERNEL); + if (!nsock) { + err = -ENOMEM; + goto put_socket; } socks = krealloc(config->socks, (config->num_connections + 1) * sizeof(struct nbd_sock *), GFP_KERNEL); if (!socks) { - sockfd_put(sock); - return -ENOMEM; + kfree(nsock); + err = -ENOMEM; + goto put_socket; } config->socks = socks; - nsock = kzalloc(sizeof(struct nbd_sock), GFP_KERNEL); - if (!nsock) { - sockfd_put(sock); - return -ENOMEM; - } - nsock->fallback_index = -1; nsock->dead = false; mutex_init(&nsock->tx_lock); @@ -990,6 +991,10 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg, atomic_inc(&config->live_connections); return 0; + +put_socket: + sockfd_put(sock); + return err; } static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg) diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c index 075662f2cf46631c10fde3f8ca9c95271145ee61..d20f66d57804015831b165cc5a4ef6775021a57a 100644 --- a/drivers/block/ps3disk.c +++ b/drivers/block/ps3disk.c @@ -468,7 +468,6 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev) blk_queue_bounce_limit(queue, BLK_BOUNCE_HIGH); blk_queue_max_hw_sectors(queue, dev->bounce_size >> 9); - blk_queue_segment_boundary(queue, -1UL); blk_queue_dma_alignment(queue, dev->blk_size-1); blk_queue_logical_block_size(queue, dev->blk_size); diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 763308990dd8119578267ec6160456d86b250033..d7e8f79530b9a0b394b7c772c4d7b4054d2d0ba7 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -38,6 +38,15 @@ struct virtio_blk_vq { } ____cacheline_aligned_in_smp; struct virtio_blk { + /* + * This mutex must be held by anything that may run after + * virtblk_remove() sets vblk->vdev to NULL. + * + * blk-mq, virtqueue processing, and sysfs attribute code paths are + * shut down before vblk->vdev is set to NULL and therefore do not need + * to hold this mutex. + */ + struct mutex vdev_mutex; struct virtio_device *vdev; /* The disk structure for the kernel. */ @@ -56,6 +65,12 @@ struct virtio_blk { struct request *req_pending; bool work_pending; #endif + /* + * Tracks references from block_device_operations open/release and + * virtio_driver probe/remove so this object can be freed once no + * longer in use. + */ + refcount_t refs; /* What host tells us, plus 2 for header & tailer. */ unsigned int sg_elems; @@ -410,10 +425,55 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str) return err; } +static void virtblk_get(struct virtio_blk *vblk) +{ + refcount_inc(&vblk->refs); +} + +static void virtblk_put(struct virtio_blk *vblk) +{ + if (refcount_dec_and_test(&vblk->refs)) { + ida_simple_remove(&vd_index_ida, vblk->index); + mutex_destroy(&vblk->vdev_mutex); + kfree(vblk); + } +} + +static int virtblk_open(struct block_device *bd, fmode_t mode) +{ + struct virtio_blk *vblk = bd->bd_disk->private_data; + int ret = 0; + + mutex_lock(&vblk->vdev_mutex); + + if (vblk->vdev) + virtblk_get(vblk); + else + ret = -ENXIO; + + mutex_unlock(&vblk->vdev_mutex); + return ret; +} + +static void virtblk_release(struct gendisk *disk, fmode_t mode) +{ + struct virtio_blk *vblk = disk->private_data; + + virtblk_put(vblk); +} + /* We provide getgeo only to please some old bootloader/partitioning tools */ static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo) { struct virtio_blk *vblk = bd->bd_disk->private_data; + int ret = 0; + + mutex_lock(&vblk->vdev_mutex); + + if (!vblk->vdev) { + ret = -ENXIO; + goto out; + } /* see if the host passed in geometry config */ if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) { @@ -429,12 +489,16 @@ static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo) geo->sectors = 1 << 5; geo->cylinders = get_capacity(bd->bd_disk) >> 11; } - return 0; +out: + mutex_unlock(&vblk->vdev_mutex); + return ret; } static const struct block_device_operations virtblk_fops = { .ioctl = virtblk_ioctl, .owner = THIS_MODULE, + .open = virtblk_open, + .release = virtblk_release, .getgeo = virtblk_getgeo, }; @@ -794,6 +858,10 @@ static int virtblk_probe(struct virtio_device *vdev) goto out_free_index; } + /* This reference is dropped in virtblk_remove(). */ + refcount_set(&vblk->refs, 1); + mutex_init(&vblk->vdev_mutex); + vblk->vdev = vdev; vblk->sg_elems = sg_elems; @@ -957,6 +1025,7 @@ static int virtblk_probe(struct virtio_device *vdev) put_disk(vblk->disk); out_free_vq: vdev->config->del_vqs(vdev); + kfree(vblk->vqs); out_free_vblk: kfree(vblk); out_free_index: @@ -968,8 +1037,6 @@ static int virtblk_probe(struct virtio_device *vdev) static void virtblk_remove(struct virtio_device *vdev) { struct virtio_blk *vblk = vdev->priv; - int index = vblk->index; - int refc; /* Make sure no work handler is accessing the device. */ flush_work(&vblk->config_work); @@ -979,18 +1046,21 @@ static void virtblk_remove(struct virtio_device *vdev) blk_mq_free_tag_set(&vblk->tag_set); + mutex_lock(&vblk->vdev_mutex); + /* Stop all the virtqueues. */ vdev->config->reset(vdev); - refc = kref_read(&disk_to_dev(vblk->disk)->kobj.kref); + /* Virtqueues are stopped, nothing can use vblk->vdev anymore. */ + vblk->vdev = NULL; + put_disk(vblk->disk); vdev->config->del_vqs(vdev); kfree(vblk->vqs); - kfree(vblk); - /* Only free device id if we don't have any users */ - if (refc == 1) - ida_simple_remove(&vd_index_ida, index); + mutex_unlock(&vblk->vdev_mutex); + + virtblk_put(vblk); } #ifdef CONFIG_PM_SLEEP diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 16e3356afdfd2fe9243d70b8c632903238794727..5bdfdaa54e2e1c72fb6d190ef1c7abd01f495d9d 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -2146,7 +2146,8 @@ static ssize_t hot_add_show(struct class *class, return ret; return scnprintf(buf, PAGE_SIZE, "%d\n", ret); } -static CLASS_ATTR_RO(hot_add); +static struct class_attribute class_attr_hot_add = + __ATTR(hot_add, 0400, hot_add_show, NULL); static ssize_t hot_remove_store(struct class *class, struct class_attribute *attr, diff --git a/drivers/bus/mhi/core/mhi_boot.c b/drivers/bus/mhi/core/mhi_boot.c index f8bdded6d48f756d30193ca7165962c9978f5596..ed6bf499e5d5b7ba5d5014c7720bf0d095e05b1e 100644 --- a/drivers/bus/mhi/core/mhi_boot.c +++ b/drivers/bus/mhi/core/mhi_boot.c @@ -609,12 +609,9 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl) ret = mhi_fw_load_sbl(mhi_cntrl, dma_addr, size); mhi_free_coherent(mhi_cntrl, size, buf, dma_addr); - if (!mhi_cntrl->fbc_download || ret || mhi_cntrl->ee == MHI_EE_EDL) - release_firmware(firmware); - /* error or in edl, we're done */ if (ret || mhi_cntrl->ee == MHI_EE_EDL) - return; + goto release_fw; write_lock_irq(&mhi_cntrl->pm_lock); mhi_cntrl->dev_state = MHI_STATE_RESET; @@ -629,7 +626,7 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl) firmware->size); if (ret) { MHI_CNTRL_ERR("Error alloc size:%zu\n", firmware->size); - goto error_alloc_fw_table; + goto release_fw; } MHI_CNTRL_LOG("Copying firmware image into vector table\n"); @@ -648,7 +645,7 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl) TO_MHI_EXEC_STR(mhi_cntrl->ee), ret); if (!mhi_cntrl->fbc_download) - return; + goto release_fw; if (ret) { MHI_CNTRL_ERR("Did not transition to READY state\n"); @@ -682,7 +679,7 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl) mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image); mhi_cntrl->fbc_image = NULL; -error_alloc_fw_table: +release_fw: release_firmware(firmware); } diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index dde7caac7f9ff01f94201748fe7615d0c6e37949..7516ba981b63579fb8a252dabea4d6ecde52d9ea 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -846,6 +846,7 @@ void intel_gtt_insert_page(dma_addr_t addr, unsigned int flags) { intel_private.driver->write_entry(addr, pg, flags); + readl(intel_private.gtt + pg); if (intel_private.driver->chipset_flush) intel_private.driver->chipset_flush(); } @@ -871,7 +872,7 @@ void intel_gtt_insert_sg_entries(struct sg_table *st, j++; } } - wmb(); + readl(intel_private.gtt + j - 1); if (intel_private.driver->chipset_flush) intel_private.driver->chipset_flush(); } @@ -1105,6 +1106,7 @@ static void i9xx_cleanup(void) static void i9xx_chipset_flush(void) { + wmb(); if (intel_private.i9xx_flush_page) writel(1, intel_private.i9xx_flush_page); } diff --git a/drivers/char/diag/diag_mux.c b/drivers/char/diag/diag_mux.c index 593bc1b6d5127eaffeffa7edec1bd45919171961..ebc53fcab73c0b0e23b65f8b2c6c841b6292e3ab 100644 --- a/drivers/char/diag/diag_mux.c +++ b/drivers/char/diag/diag_mux.c @@ -125,6 +125,8 @@ int diag_pcie_register_ops(int proc, int ctx, struct diag_mux_ops *ops) return 0; pcie_logger.ops[proc] = ops; + DIAG_LOG(DIAG_DEBUG_MUX, + "diag: registering pcie for proc: %d\n", proc); err = diag_pcie_register(proc, ctx, ops); if (err) { pr_err("diag: MUX: unable to register pcie operations for proc: %d, err: %d\n", diff --git a/drivers/char/diag/diag_mux.h b/drivers/char/diag/diag_mux.h index 5fe41aae085036d2aebf61a9bb9d8f1a3f58eab9..8674deb34fbb4533ff3ec52fa905d04265ee042a 100644 --- a/drivers/char/diag/diag_mux.h +++ b/drivers/char/diag/diag_mux.h @@ -78,4 +78,5 @@ int diag_mux_close_all(void); int diag_pcie_register_ops(int proc, int ctx, struct diag_mux_ops *ops); int diag_usb_register_ops(int proc, int ctx, struct diag_mux_ops *ops); int diag_mux_switch_logging(int proc, int *new_mode, int *peripheral_mask); +void diag_notify_md_client(uint8_t proc, uint8_t peripheral, int data); #endif diff --git a/drivers/char/diag/diag_pcie.c b/drivers/char/diag/diag_pcie.c index f8c424005bc90af868cd122efe4052e5adf258dd..56dca251edb6cfbb3b5b366f3c223fbf39b8b6df 100644 --- a/drivers/char/diag/diag_pcie.c +++ b/drivers/char/diag/diag_pcie.c @@ -416,31 +416,41 @@ void diag_pcie_client_cb(struct mhi_dev_client_cb_data *cb_data) { struct diag_pcie_info *pcie_info = NULL; - if (!cb_data) + if (!cb_data) { + pr_err("diag: %s: Invalid cb_data\n", __func__); return; - + } pcie_info = cb_data->user_data; - if (!pcie_info) + if (!pcie_info) { + pr_err("diag: %s: Invalid pcie_info\n", __func__); return; - + } switch (cb_data->ctrl_info) { case MHI_STATE_CONNECTED: if (cb_data->channel == pcie_info->out_chan) { DIAG_LOG(DIAG_DEBUG_MUX, - " Received connect event from MHI for %d", + "diag: Received connect event from MHI for %d\n", + pcie_info->out_chan); + if (atomic_read(&pcie_info->enabled)) { + DIAG_LOG(DIAG_DEBUG_MUX, + "diag: Channel %d is already enabled\n", pcie_info->out_chan); - if (atomic_read(&pcie_info->enabled)) return; + } queue_work(pcie_info->wq, &pcie_info->open_work); } break; case MHI_STATE_DISCONNECTED: if (cb_data->channel == pcie_info->out_chan) { DIAG_LOG(DIAG_DEBUG_MUX, - " Received disconnect event from MHI for %d", + "diag: Received disconnect event from MHI for %d\n", + pcie_info->out_chan); + if (!atomic_read(&pcie_info->enabled)) { + DIAG_LOG(DIAG_DEBUG_MUX, + "diag: Channel %d is already disabled\n", pcie_info->out_chan); - if (!atomic_read(&pcie_info->enabled)) return; + } queue_work(pcie_info->wq, &pcie_info->close_work); } break; @@ -490,12 +500,9 @@ static void diag_pcie_connect(struct diag_pcie_info *ch) queue_work(ch->wq, &(ch->read_work)); } -void diag_pcie_open_work_fn(struct work_struct *work) +static void diag_pcie_open_channels(struct diag_pcie_info *pcie_info) { int rc = 0; - struct diag_pcie_info *pcie_info = container_of(work, - struct diag_pcie_info, - open_work); if (!pcie_info || atomic_read(&pcie_info->enabled)) return; @@ -540,6 +547,15 @@ void diag_pcie_open_work_fn(struct work_struct *work) mutex_unlock(&pcie_info->out_chan_lock); } +void diag_pcie_open_work_fn(struct work_struct *work) +{ + struct diag_pcie_info *pcie_info = container_of(work, + struct diag_pcie_info, + open_work); + + diag_pcie_open_channels(pcie_info); +} + /* * This function performs pcie connect operations wrt Diag synchronously. It * doesn't translate to actual pcie connect. This is used when Diag switches @@ -679,6 +695,8 @@ int diag_pcie_register(int id, int ctxt, struct diag_mux_ops *ops) return -EIO; } + pr_info("diag: Pcie registration initiated for id: %d\n", id); + ch = &diag_pcie[id]; ch->ops = ops; ch->ctxt = ctxt; @@ -692,17 +710,27 @@ int diag_pcie_register(int id, int ctxt, struct diag_mux_ops *ops) strlcpy(wq_name, "DIAG_PCIE_", sizeof(wq_name)); strlcat(wq_name, ch->name, sizeof(wq_name)); ch->wq = create_singlethread_workqueue(wq_name); - if (!ch->wq) + if (!ch->wq) { + pr_err("diag: %s: failed creating workqueue for wq_name: %s\n", + __func__, wq_name); return -ENOMEM; + } + DIAG_LOG(DIAG_DEBUG_MUX, "diag: created wq: %s\n", wq_name); diagmem_init(driver, ch->mempool); mutex_init(&ch->in_chan_lock); mutex_init(&ch->out_chan_lock); rc = diag_register_pcie_channels(ch); - if (rc < 0) { + if (rc == -EEXIST) { + pr_err("diag: Handled -EEXIST error\n"); + diag_pcie_open_channels(ch); + } else if (rc < 0 && rc != -EEXIST) { if (ch->wq) destroy_workqueue(ch->wq); kfree(ch->in_chan_attr.read_buffer); + pr_err("diag: %s: failed registering pcie channels\n", + __func__); return rc; } + pr_info("diag: pcie channel with id: %d registered successfully\n", id); return 0; } diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c index 9b953e6b7b5c1b9e0558dd1bc8cb83f8931ba9b6..9b6c95eb9859901189328dc750b74b96d1d73e4d 100644 --- a/drivers/char/diag/diagchar_core.c +++ b/drivers/char/diag/diagchar_core.c @@ -1968,7 +1968,7 @@ static int diag_switch_logging(struct diag_logging_mode_param_t *param) DIAG_LOG(DIAG_DEBUG_USERSPACE, "not switching modes c: %d n: %d\n", curr_mode, new_mode); - return 0; + continue; } diag_ws_reset(DIAG_WS_MUX); diff --git a/drivers/char/diag/diagfwd_bridge.c b/drivers/char/diag/diagfwd_bridge.c index f8f4425df428be2eafe63b3e9f95d9280afa64da..0c5ac5475344963719bfad58ad72495c7de3a1ac 100644 --- a/drivers/char/diag/diagfwd_bridge.c +++ b/drivers/char/diag/diagfwd_bridge.c @@ -168,10 +168,12 @@ int diag_remote_dev_open(int id) if (id < 0 || id >= NUM_REMOTE_DEV) return -EINVAL; bridge_info[id].inited = 1; - if (bridge_info[id].type == DIAG_DATA_TYPE) + if (bridge_info[id].type == DIAG_DATA_TYPE) { + diag_notify_md_client(BRIDGE_TO_MUX(id), 0, DIAG_STATUS_OPEN); return diag_mux_queue_read(BRIDGE_TO_MUX(id)); - else if (bridge_info[id].type == DIAG_DCI_TYPE) + } else if (bridge_info[id].type == DIAG_DCI_TYPE) { return diag_dci_send_handshake_pkt(bridge_info[id].id); + } return 0; } @@ -184,6 +186,9 @@ void diag_remote_dev_close(int id) diag_mux_close_device(BRIDGE_TO_MUX(id)); + if (bridge_info[id].type == DIAG_DATA_TYPE) + diag_notify_md_client(BRIDGE_TO_MUX(id), 0, DIAG_STATUS_CLOSED); + } int diag_remote_dev_read_done(int id, unsigned char *buf, int len) diff --git a/drivers/char/diag/diagfwd_cntl.c b/drivers/char/diag/diagfwd_cntl.c index 15801eb2d8aa60746ad66e6e96774b6b47dd222f..3d74b1e962575e7971561cfcc5d5d38fcef9911b 100644 --- a/drivers/char/diag/diagfwd_cntl.c +++ b/drivers/char/diag/diagfwd_cntl.c @@ -27,11 +27,10 @@ #include "diag_mux.h" #define FEATURE_SUPPORTED(x) ((feature_mask << (i * 8)) & (1 << x)) - +#define DIAG_GET_MD_DEVICE_SIG_MASK(proc) (0x100000 * (1 << proc)) /* tracks which peripheral is undergoing SSR */ static uint16_t reg_dirty[NUM_PERIPHERALS]; static uint8_t diag_id = DIAG_ID_APPS; -static void diag_notify_md_client(uint8_t peripheral, int data); static void diag_mask_update_work_fn(struct work_struct *work) { @@ -50,7 +49,9 @@ void diag_cntl_channel_open(struct diagfwd_info *p_info) return; driver->mask_update |= PERIPHERAL_MASK(p_info->peripheral); queue_work(driver->cntl_wq, &driver->mask_update_work); - diag_notify_md_client(p_info->peripheral, DIAG_STATUS_OPEN); + diag_notify_md_client(DIAG_LOCAL_PROC, p_info->peripheral, + DIAG_STATUS_OPEN); + } void diag_cntl_channel_close(struct diagfwd_info *p_info) @@ -74,7 +75,7 @@ void diag_cntl_channel_close(struct diagfwd_info *p_info) driver->stm_state[peripheral] = DISABLE_STM; driver->stm_state_requested[peripheral] = DISABLE_STM; reg_dirty[peripheral] = 0; - diag_notify_md_client(peripheral, DIAG_STATUS_CLOSED); + diag_notify_md_client(DIAG_LOCAL_PROC, peripheral, DIAG_STATUS_CLOSED); } static void diag_stm_update_work_fn(struct work_struct *work) @@ -105,9 +106,9 @@ static void diag_stm_update_work_fn(struct work_struct *work) } } -void diag_notify_md_client(uint8_t peripheral, int data) +void diag_notify_md_client(uint8_t proc, uint8_t peripheral, int data) { - int stat = 0, proc = DIAG_LOCAL_PROC; + int stat = 0; struct siginfo info; struct pid *pid_struct; struct task_struct *result; @@ -121,7 +122,10 @@ void diag_notify_md_client(uint8_t peripheral, int data) mutex_lock(&driver->md_session_lock); memset(&info, 0, sizeof(struct siginfo)); info.si_code = SI_QUEUE; - info.si_int = (PERIPHERAL_MASK(peripheral) | data); + info.si_int = (DIAG_GET_MD_DEVICE_SIG_MASK(proc) | data); + if (proc == DIAG_LOCAL_PROC) + info.si_int = info.si_int | + (PERIPHERAL_MASK(peripheral) | data); info.si_signo = SIGCONT; if (!driver->md_session_map[proc][peripheral] || @@ -179,7 +183,7 @@ static void process_pd_status(uint8_t *buf, uint32_t len, pd_msg = (struct diag_ctrl_msg_pd_status *)buf; pd = pd_msg->pd_id; status = (pd_msg->status == 0) ? DIAG_STATUS_OPEN : DIAG_STATUS_CLOSED; - diag_notify_md_client(peripheral, status); + diag_notify_md_client(DIAG_LOCAL_PROC, peripheral, status); } static void enable_stm_feature(uint8_t peripheral) diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c index 0146bc3252c5abcd3853190b38609c8d37b5f631..cf87bfe971e6bfbff66685526e92873a60119902 100644 --- a/drivers/char/ipmi/ipmi_ssif.c +++ b/drivers/char/ipmi/ipmi_ssif.c @@ -1731,7 +1731,9 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id) out: if (rv) { - addr_info->client = NULL; + if (addr_info) + addr_info->client = NULL; + dev_err(&client->dev, "Unable to start IPMI SSIF: %d\n", rv); kfree(ssif_info); } diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c index 9b1116501f209fe19220d81d0a04080e47a51266..c028ffd953326bd8b9fa31d5452599a6e9e23894 100644 --- a/drivers/char/tpm/tpm_tis_core.c +++ b/drivers/char/tpm/tpm_tis_core.c @@ -897,7 +897,7 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq, return 0; out_err: - if ((chip->ops != NULL) && (chip->ops->clk_enable != NULL)) + if (chip->ops->clk_enable != NULL) chip->ops->clk_enable(chip, false); tpm_tis_remove(chip); diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 5200772ab0bd75bee221b86d67b6421c507dc867..6a57237e46db7973dc0b141376956060ece55e6b 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -2158,6 +2158,7 @@ static struct virtio_device_id id_table[] = { { VIRTIO_ID_CONSOLE, VIRTIO_DEV_ANY_ID }, { 0 }, }; +MODULE_DEVICE_TABLE(virtio, id_table); static unsigned int features[] = { VIRTIO_CONSOLE_F_SIZE, @@ -2170,6 +2171,7 @@ static struct virtio_device_id rproc_serial_id_table[] = { #endif { 0 }, }; +MODULE_DEVICE_TABLE(virtio, rproc_serial_id_table); static unsigned int rproc_serial_features[] = { }; @@ -2322,6 +2324,5 @@ static void __exit fini(void) module_init(init); module_exit(fini); -MODULE_DEVICE_TABLE(virtio, id_table); MODULE_DESCRIPTION("Virtio console driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c index 5f8082d89131307744cafa84c2167c0a59c4c979..6db4204e5d5d500c72a66a82689984601eb9ee6e 100644 --- a/drivers/clk/bcm/clk-bcm2835.c +++ b/drivers/clk/bcm/clk-bcm2835.c @@ -1483,13 +1483,13 @@ static struct clk_hw *bcm2835_register_clock(struct bcm2835_cprman *cprman, return &clock->hw; } -static struct clk *bcm2835_register_gate(struct bcm2835_cprman *cprman, +static struct clk_hw *bcm2835_register_gate(struct bcm2835_cprman *cprman, const struct bcm2835_gate_data *data) { - return clk_register_gate(cprman->dev, data->name, data->parent, - CLK_IGNORE_UNUSED | CLK_SET_RATE_GATE, - cprman->regs + data->ctl_reg, - CM_GATE_BIT, 0, &cprman->regs_lock); + return clk_hw_register_gate(cprman->dev, data->name, data->parent, + CLK_IGNORE_UNUSED | CLK_SET_RATE_GATE, + cprman->regs + data->ctl_reg, + CM_GATE_BIT, 0, &cprman->regs_lock); } typedef struct clk_hw *(*bcm2835_clk_register)(struct bcm2835_cprman *cprman, diff --git a/drivers/clk/qcom/gcc-msm8916.c b/drivers/clk/qcom/gcc-msm8916.c index 2057809219f4eebcf3f43a68e75aef723c814f67..7426d910e079710b9cc7d41b683c305efbc7837e 100644 --- a/drivers/clk/qcom/gcc-msm8916.c +++ b/drivers/clk/qcom/gcc-msm8916.c @@ -270,7 +270,7 @@ static struct clk_pll gpll0 = { .l_reg = 0x21004, .m_reg = 0x21008, .n_reg = 0x2100c, - .config_reg = 0x21014, + .config_reg = 0x21010, .mode_reg = 0x21000, .status_reg = 0x2101c, .status_bit = 17, @@ -297,7 +297,7 @@ static struct clk_pll gpll1 = { .l_reg = 0x20004, .m_reg = 0x20008, .n_reg = 0x2000c, - .config_reg = 0x20014, + .config_reg = 0x20010, .mode_reg = 0x20000, .status_reg = 0x2001c, .status_bit = 17, @@ -324,7 +324,7 @@ static struct clk_pll gpll2 = { .l_reg = 0x4a004, .m_reg = 0x4a008, .n_reg = 0x4a00c, - .config_reg = 0x4a014, + .config_reg = 0x4a010, .mode_reg = 0x4a000, .status_reg = 0x4a01c, .status_bit = 17, @@ -351,7 +351,7 @@ static struct clk_pll bimc_pll = { .l_reg = 0x23004, .m_reg = 0x23008, .n_reg = 0x2300c, - .config_reg = 0x23014, + .config_reg = 0x23010, .mode_reg = 0x23000, .status_reg = 0x2301c, .status_bit = 17, diff --git a/drivers/clk/qcom/gdsc-regulator.c b/drivers/clk/qcom/gdsc-regulator.c index f689661cfc3d2e6b5b46de03749e405e11bb6e9e..a84621c2ff6a4dae71573e9e5f9a35b8ce6e5b89 100644 --- a/drivers/clk/qcom/gdsc-regulator.c +++ b/drivers/clk/qcom/gdsc-regulator.c @@ -863,6 +863,8 @@ static int gdsc_probe(struct platform_device *pdev) sc->mbox = mbox_request_channel(&sc->mbox_client, 0); if (IS_ERR(sc->mbox)) { ret = PTR_ERR(sc->mbox); + if (ret == -EAGAIN) + ret = -EPROBE_DEFER; if (ret != -EPROBE_DEFER) dev_err(&pdev->dev, "mailbox channel request failed, ret=%d\n", ret); diff --git a/drivers/clk/rockchip/clk-rk3228.c b/drivers/clk/rockchip/clk-rk3228.c index 7af48184b0224b1428ba1e9788f941511a47c9bb..04f4f3739e3bed0f0e90d77b77279a50837e03ee 100644 --- a/drivers/clk/rockchip/clk-rk3228.c +++ b/drivers/clk/rockchip/clk-rk3228.c @@ -163,8 +163,6 @@ PNAME(mux_i2s_out_p) = { "i2s1_pre", "xin12m" }; PNAME(mux_i2s2_p) = { "i2s2_src", "i2s2_frac", "xin12m" }; PNAME(mux_sclk_spdif_p) = { "sclk_spdif_src", "spdif_frac", "xin12m" }; -PNAME(mux_aclk_gpu_pre_p) = { "cpll_gpu", "gpll_gpu", "hdmiphy_gpu", "usb480m_gpu" }; - PNAME(mux_uart0_p) = { "uart0_src", "uart0_frac", "xin24m" }; PNAME(mux_uart1_p) = { "uart1_src", "uart1_frac", "xin24m" }; PNAME(mux_uart2_p) = { "uart2_src", "uart2_frac", "xin24m" }; @@ -475,16 +473,9 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = { RK2928_CLKSEL_CON(24), 6, 10, DFLAGS, RK2928_CLKGATE_CON(2), 8, GFLAGS), - GATE(0, "cpll_gpu", "cpll", 0, - RK2928_CLKGATE_CON(3), 13, GFLAGS), - GATE(0, "gpll_gpu", "gpll", 0, - RK2928_CLKGATE_CON(3), 13, GFLAGS), - GATE(0, "hdmiphy_gpu", "hdmiphy", 0, - RK2928_CLKGATE_CON(3), 13, GFLAGS), - GATE(0, "usb480m_gpu", "usb480m", 0, + COMPOSITE(0, "aclk_gpu_pre", mux_pll_src_4plls_p, 0, + RK2928_CLKSEL_CON(34), 5, 2, MFLAGS, 0, 5, DFLAGS, RK2928_CLKGATE_CON(3), 13, GFLAGS), - COMPOSITE_NOGATE(0, "aclk_gpu_pre", mux_aclk_gpu_pre_p, 0, - RK2928_CLKSEL_CON(34), 5, 2, MFLAGS, 0, 5, DFLAGS), COMPOSITE(SCLK_SPI0, "sclk_spi0", mux_pll_src_2plls_p, 0, RK2928_CLKSEL_CON(25), 8, 1, MFLAGS, 0, 7, DFLAGS, @@ -589,8 +580,8 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = { GATE(0, "pclk_peri_noc", "pclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(12), 2, GFLAGS), /* PD_GPU */ - GATE(ACLK_GPU, "aclk_gpu", "aclk_gpu_pre", 0, RK2928_CLKGATE_CON(13), 14, GFLAGS), - GATE(0, "aclk_gpu_noc", "aclk_gpu_pre", 0, RK2928_CLKGATE_CON(13), 15, GFLAGS), + GATE(ACLK_GPU, "aclk_gpu", "aclk_gpu_pre", 0, RK2928_CLKGATE_CON(7), 14, GFLAGS), + GATE(0, "aclk_gpu_noc", "aclk_gpu_pre", 0, RK2928_CLKGATE_CON(7), 15, GFLAGS), /* PD_BUS */ GATE(0, "sclk_initmem_mbist", "aclk_cpu", 0, RK2928_CLKGATE_CON(8), 1, GFLAGS), diff --git a/drivers/clk/samsung/clk-exynos5433.c b/drivers/clk/samsung/clk-exynos5433.c index 1d2265f9ee97436cda8a023a57bb3b6aa898c521..1c327d5de98cbc4c0f0a3dde25a6c4fb6ebb2843 100644 --- a/drivers/clk/samsung/clk-exynos5433.c +++ b/drivers/clk/samsung/clk-exynos5433.c @@ -1674,7 +1674,8 @@ static const struct samsung_gate_clock peric_gate_clks[] __initconst = { GATE(CLK_SCLK_PCM1, "sclk_pcm1", "sclk_pcm1_peric", ENABLE_SCLK_PERIC, 7, CLK_SET_RATE_PARENT, 0), GATE(CLK_SCLK_I2S1, "sclk_i2s1", "sclk_i2s1_peric", - ENABLE_SCLK_PERIC, 6, CLK_SET_RATE_PARENT, 0), + ENABLE_SCLK_PERIC, 6, + CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0), GATE(CLK_SCLK_SPI2, "sclk_spi2", "sclk_spi2_peric", ENABLE_SCLK_PERIC, 5, CLK_SET_RATE_PARENT, 0), GATE(CLK_SCLK_SPI1, "sclk_spi1", "sclk_spi1_peric", ENABLE_SCLK_PERIC, diff --git a/drivers/clk/st/clk-flexgen.c b/drivers/clk/st/clk-flexgen.c index 918ba3164da94c9dcf02927b17940f7e76165551..cd856210db58c1d65b1d8ddfa802695fc7db635a 100644 --- a/drivers/clk/st/clk-flexgen.c +++ b/drivers/clk/st/clk-flexgen.c @@ -373,6 +373,7 @@ static void __init st_of_flexgen_setup(struct device_node *np) break; } + flex_flags &= ~CLK_IS_CRITICAL; of_clk_detect_critical(np, i, &flex_flags); /* diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c index aa4add580516d471e5418d39be61f2b1706d84bf..0b5e091742f97b41e13327557b901e35ee4a31f3 100644 --- a/drivers/clk/sunxi/clk-sunxi.c +++ b/drivers/clk/sunxi/clk-sunxi.c @@ -98,7 +98,7 @@ static void sun6i_a31_get_pll1_factors(struct factors_request *req) * Round down the frequency to the closest multiple of either * 6 or 16 */ - u32 round_freq_6 = round_down(freq_mhz, 6); + u32 round_freq_6 = rounddown(freq_mhz, 6); u32 round_freq_16 = round_down(freq_mhz, 16); if (round_freq_6 > round_freq_16) diff --git a/drivers/clk/ti/composite.c b/drivers/clk/ti/composite.c index beea89463ca2cfd996693a27926d9ea324c40eb3..4ea5c08a1eb668c4142a3e33d1da74eebc754d3f 100644 --- a/drivers/clk/ti/composite.c +++ b/drivers/clk/ti/composite.c @@ -240,6 +240,7 @@ static void __init _register_composite(struct clk_hw *hw, if (!cclk->comp_clks[i]) continue; list_del(&cclk->comp_clks[i]->link); + kfree(cclk->comp_clks[i]->parent_names); kfree(cclk->comp_clks[i]); } diff --git a/drivers/clocksource/dw_apb_timer.c b/drivers/clocksource/dw_apb_timer.c index 1f5f734e4919da6e18b5e34e50b60653dd8dd4a1..a018199575e3e2eda95ae61455fd2572a45af0c5 100644 --- a/drivers/clocksource/dw_apb_timer.c +++ b/drivers/clocksource/dw_apb_timer.c @@ -225,7 +225,8 @@ static int apbt_next_event(unsigned long delta, /** * dw_apb_clockevent_init() - use an APB timer as a clock_event_device * - * @cpu: The CPU the events will be targeted at. + * @cpu: The CPU the events will be targeted at or -1 if CPU affiliation + * isn't required. * @name: The name used for the timer and the IRQ for it. * @rating: The rating to give the timer. * @base: I/O base for the timer registers. @@ -260,7 +261,7 @@ dw_apb_clockevent_init(int cpu, const char *name, unsigned rating, dw_ced->ced.max_delta_ticks = 0x7fffffff; dw_ced->ced.min_delta_ns = clockevent_delta2ns(5000, &dw_ced->ced); dw_ced->ced.min_delta_ticks = 5000; - dw_ced->ced.cpumask = cpumask_of(cpu); + dw_ced->ced.cpumask = cpu < 0 ? cpu_possible_mask : cpumask_of(cpu); dw_ced->ced.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_DYNIRQ; dw_ced->ced.set_state_shutdown = apbt_shutdown; diff --git a/drivers/clocksource/dw_apb_timer_of.c b/drivers/clocksource/dw_apb_timer_of.c index 69866cd8f4bb419cae13a71b5569eedd1d32546a..3e4d0e5733d385f0a0621135919eb75734fe46b4 100644 --- a/drivers/clocksource/dw_apb_timer_of.c +++ b/drivers/clocksource/dw_apb_timer_of.c @@ -146,10 +146,6 @@ static int num_called; static int __init dw_apb_timer_init(struct device_node *timer) { switch (num_called) { - case 0: - pr_debug("%s: found clockevent timer\n", __func__); - add_clockevent(timer); - break; case 1: pr_debug("%s: found clocksource timer\n", __func__); add_clocksource(timer); @@ -160,6 +156,8 @@ static int __init dw_apb_timer_init(struct device_node *timer) #endif break; default: + pr_debug("%s: found clockevent timer\n", __func__); + add_clockevent(timer); break; } diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 7a5662425b29108853325ed1c93fda2e713dc4d5..1aa0b05c8cbdfd3d9a7e7fb3fadea187e3ffbc84 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -935,7 +935,7 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b, update_turbo_state(); if (global.turbo_disabled) { - pr_warn("Turbo disabled by BIOS or unavailable on processor\n"); + pr_notice_once("Turbo disabled by BIOS or unavailable on processor\n"); mutex_unlock(&intel_pstate_limits_lock); mutex_unlock(&intel_pstate_driver_lock); return -EPERM; diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c index ae948b1da93a379b12d16aaf98be5df6ac762da4..909bd2255978be54465ed44992bfcb48a1d0ea6b 100644 --- a/drivers/cpuidle/sysfs.c +++ b/drivers/cpuidle/sysfs.c @@ -414,7 +414,7 @@ static int cpuidle_add_state_sysfs(struct cpuidle_device *device) ret = kobject_init_and_add(&kobj->kobj, &ktype_state_cpuidle, &kdev->kobj, "state%d", i); if (ret) { - kfree(kobj); + kobject_put(&kobj->kobj); goto error_state; } kobject_uevent(&kobj->kobj, KOBJ_ADD); @@ -544,7 +544,7 @@ static int cpuidle_add_driver_sysfs(struct cpuidle_device *dev) ret = kobject_init_and_add(&kdrv->kobj, &ktype_driver_cpuidle, &kdev->kobj, "driver"); if (ret) { - kfree(kdrv); + kobject_put(&kdrv->kobj); return ret; } @@ -638,7 +638,7 @@ int cpuidle_add_sysfs(struct cpuidle_device *dev) error = kobject_init_and_add(&kdev->kobj, &ktype_cpuidle, &cpu_dev->kobj, "cpuidle"); if (error) { - kfree(kdev); + kobject_put(&kdev->kobj); return error; } diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 26e1103e49a6a0b51a43ee5bdb5deadea90bf10a..b87c6654a837718975cdccd028505e9f589e5c56 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -771,4 +771,14 @@ config CRYPTO_DEV_ARTPEC6 To compile this driver as a module, choose M here. +config CRYPTO_DEV_QCOM_ICE + tristate "Inline Crypto Module" + default n + depends on BLK_DEV_DM + help + This driver supports Inline Crypto Engine for QTI chipsets, MSM8994 + and later, to accelerate crypto operations for storage needs. + To compile this driver as a module, choose M here: the + module will be called ice. + endif # CRYPTO_HW diff --git a/drivers/crypto/cavium/nitrox/nitrox_main.c b/drivers/crypto/cavium/nitrox/nitrox_main.c index fee7cb2ce747c77e97b7a8f4a62eeb452b263147..a81f3c7e941d9c44d73c48312a2bc3af0cd7d436 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_main.c +++ b/drivers/crypto/cavium/nitrox/nitrox_main.c @@ -183,7 +183,7 @@ static void nitrox_remove_from_devlist(struct nitrox_device *ndev) struct nitrox_device *nitrox_get_first_device(void) { - struct nitrox_device *ndev = NULL; + struct nitrox_device *ndev; mutex_lock(&devlist_lock); list_for_each_entry(ndev, &ndevlist, list) { @@ -191,7 +191,7 @@ struct nitrox_device *nitrox_get_first_device(void) break; } mutex_unlock(&devlist_lock); - if (!ndev) + if (&ndev->list == &ndevlist) return NULL; refcount_inc(&ndev->refcnt); diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig index 6d626606b9c51d2edb568ec82c8d37401c30f2a4..898dcf3200c329f0e159af6394d66b2e4d13cac4 100644 --- a/drivers/crypto/ccp/Kconfig +++ b/drivers/crypto/ccp/Kconfig @@ -8,10 +8,9 @@ config CRYPTO_DEV_CCP_DD config CRYPTO_DEV_SP_CCP bool "Cryptographic Coprocessor device" default y - depends on CRYPTO_DEV_CCP_DD + depends on CRYPTO_DEV_CCP_DD && DMADEVICES select HW_RANDOM select DMA_ENGINE - select DMADEVICES select CRYPTO_SHA1 select CRYPTO_SHA256 help diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c index 8d39f3a07bf8605cbe65fcdeea2bd5379bbbd4ba..99c3827855c7d92e4e38de417919e2716f836961 100644 --- a/drivers/crypto/chelsio/chcr_algo.c +++ b/drivers/crypto/chelsio/chcr_algo.c @@ -2201,7 +2201,7 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl, unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC; unsigned int c_id = chcrctx->dev->rx_channel_id; unsigned int ccm_xtra; - unsigned char tag_offset = 0, auth_offset = 0; + unsigned int tag_offset = 0, auth_offset = 0; unsigned int assoclen; if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) diff --git a/drivers/crypto/msm/ice.c b/drivers/crypto/msm/ice.c index 79301bbed9694d8127cf300a39a4c2fcc98bdf2a..c60d262728249d96b58509cc721b4386e6a2fc2d 100644 --- a/drivers/crypto/msm/ice.c +++ b/drivers/crypto/msm/ice.c @@ -25,7 +25,6 @@ #include #include #include "iceregs.h" -#include #include #include @@ -68,7 +67,6 @@ #define ICE_CRYPTO_CXT_FBE 2 static int ice_fde_flag; - struct ice_clk_info { struct list_head list; struct clk *clk; @@ -120,24 +118,13 @@ struct ice_device { wait_queue_head_t block_suspend_ice_queue; }; +static int qcom_ice_init(struct ice_device *ice_dev, void *host_controller_data, + ice_error_cb error_cb); + static int qti_ice_setting_config(struct request *req, - struct platform_device *pdev, struct ice_crypto_setting *crypto_data, struct ice_data_setting *setting, uint32_t cxt) { - struct ice_device *ice_dev = platform_get_drvdata(pdev); - - if (!ice_dev) { - pr_debug("%s no ICE device\n", __func__); - /* make the caller finish peacefully */ - return 0; - } - - if (ice_dev->is_ice_disable_fuse_blown) { - pr_err("%s ICE disabled fuse is blown\n", __func__); - return -EPERM; - } - if (!setting) return -EINVAL; @@ -297,23 +284,6 @@ static int qcom_ice_get_vreg(struct ice_device *ice_dev) return ret; } -static void qcom_ice_config_proc_ignore(struct ice_device *ice_dev) -{ - u32 regval; - - if (ICE_REV(ice_dev->ice_hw_version, MAJOR) == 2 && - ICE_REV(ice_dev->ice_hw_version, MINOR) == 0 && - ICE_REV(ice_dev->ice_hw_version, STEP) == 0) { - regval = qcom_ice_readl(ice_dev, - QCOM_ICE_REGS_ADVANCED_CONTROL); - regval |= 0x800; - qcom_ice_writel(ice_dev, regval, - QCOM_ICE_REGS_ADVANCED_CONTROL); - /* Ensure register is updated */ - mb(); - } -} - static void qcom_ice_low_power_mode_enable(struct ice_device *ice_dev) { u32 regval; @@ -476,45 +446,6 @@ static int qcom_ice_enable(struct ice_device *ice_dev) return 0; } -static int qcom_ice_verify_ice(struct ice_device *ice_dev) -{ - unsigned int rev; - unsigned int maj_rev, min_rev, step_rev; - - rev = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_VERSION); - maj_rev = (rev & ICE_CORE_MAJOR_REV_MASK) >> ICE_CORE_MAJOR_REV; - min_rev = (rev & ICE_CORE_MINOR_REV_MASK) >> ICE_CORE_MINOR_REV; - step_rev = (rev & ICE_CORE_STEP_REV_MASK) >> ICE_CORE_STEP_REV; - - if (maj_rev > ICE_CORE_CURRENT_MAJOR_VERSION) { - pr_err("%s: Unknown QC ICE device at %lu, rev %d.%d.%d\n", - __func__, (unsigned long)ice_dev->mmio, - maj_rev, min_rev, step_rev); - return -ENODEV; - } - ice_dev->ice_hw_version = rev; - - dev_info(ice_dev->pdev, "QC ICE %d.%d.%d device found @0x%pK\n", - maj_rev, min_rev, step_rev, - ice_dev->mmio); - - return 0; -} - -static void qcom_ice_enable_intr(struct ice_device *ice_dev) -{ - unsigned int reg; - - reg = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_NON_SEC_IRQ_MASK); - reg &= ~QCOM_ICE_NON_SEC_IRQ_MASK; - qcom_ice_writel(ice_dev, reg, QCOM_ICE_REGS_NON_SEC_IRQ_MASK); - /* - * Ensure previous instructions was completed before issuing next - * ICE initialization/optimization instruction - */ - mb(); -} - static void qcom_ice_disable_intr(struct ice_device *ice_dev) { unsigned int reg; @@ -627,25 +558,13 @@ static int qcom_ice_parse_clock_info(struct platform_device *pdev, } static int qcom_ice_get_device_tree_data(struct platform_device *pdev, - struct ice_device *ice_dev) + struct ice_device *ice_dev) { struct device *dev = &pdev->dev; int rc = -1; int irq; - ice_dev->res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!ice_dev->res) { - pr_err("%s: No memory available for IORESOURCE\n", __func__); - return -ENOMEM; - } - - ice_dev->mmio = devm_ioremap_resource(dev, ice_dev->res); - if (IS_ERR(ice_dev->mmio)) { - rc = PTR_ERR(ice_dev->mmio); - pr_err("%s: Error = %d mapping ICE io memory\n", __func__, rc); - goto out; - } - + ice_dev->mmio = NULL; if (!of_parse_phandle(pdev->dev.of_node, "vdd-hba-supply", 0)) { pr_err("%s: No vdd-hba-supply regulator, assuming not needed\n", __func__); @@ -688,7 +607,7 @@ static int qcom_ice_get_device_tree_data(struct platform_device *pdev, err_dev: if (rc && ice_dev->mmio) devm_iounmap(dev, ice_dev->mmio); -out: +//out: return rc; } @@ -810,7 +729,12 @@ static int qcom_ice_probe(struct platform_device *pdev) * We would enable ICE when first request for crypto * operation arrives. */ - ice_dev->is_ice_enabled = false; + rc = qcom_ice_init(ice_dev, NULL, NULL); + if (rc) { + pr_err("create character device failed.\n"); + goto err_ice_dev; + } + ice_dev->is_ice_enabled = true; platform_set_drvdata(pdev, ice_dev); list_add_tail(&ice_dev->list, &ice_devices); @@ -999,31 +923,6 @@ static int qcom_ice_enable_clocks(struct ice_device *ice, bool enable) return ret; } -static int qcom_ice_secure_ice_init(struct ice_device *ice_dev) -{ - /* We need to enable source for ICE secure interrupts */ - int ret = 0; - u32 regval; - - regval = scm_io_read((unsigned long)ice_dev->res + - QCOM_ICE_LUT_KEYS_ICE_SEC_IRQ_MASK); - - regval &= ~QCOM_ICE_SEC_IRQ_MASK; - ret = scm_io_write((unsigned long)ice_dev->res + - QCOM_ICE_LUT_KEYS_ICE_SEC_IRQ_MASK, regval); - - /* - * Ensure previous instructions was completed before issuing next - * ICE initialization/optimization instruction - */ - mb(); - - if (!ret) - pr_err("%s: failed(0x%x) to init secure ICE config\n", - __func__, ret); - return ret; -} - static int qcom_ice_update_sec_cfg(struct ice_device *ice_dev) { int ret = 0, scm_ret = 0; @@ -1064,7 +963,6 @@ static int qcom_ice_update_sec_cfg(struct ice_device *ice_dev) static int qcom_ice_finish_init(struct ice_device *ice_dev) { - unsigned int reg; int err = 0; if (!ice_dev) { @@ -1090,53 +988,12 @@ static int qcom_ice_finish_init(struct ice_device *ice_dev) * configurations of host & ice. It is prudent to restore the config */ err = qcom_ice_update_sec_cfg(ice_dev); - if (err) - goto out; - - err = qcom_ice_verify_ice(ice_dev); - if (err) - goto out; - - /* if ICE_DISABLE_FUSE is blown, return immediately - * Currently, FORCE HW Keys are also disabled, since - * there is no use case for their usage neither in FDE - * nor in PFE - */ - reg = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_FUSE_SETTING); - reg &= (ICE_FUSE_SETTING_MASK | - ICE_FORCE_HW_KEY0_SETTING_MASK | - ICE_FORCE_HW_KEY1_SETTING_MASK); - - if (reg) { - ice_dev->is_ice_disable_fuse_blown = true; - pr_err("%s: Error: ICE_ERROR_HW_DISABLE_FUSE_BLOWN\n", - __func__); - err = -EPERM; - goto out; - } - /* TZ side of ICE driver would handle secure init of ICE HW from v2 */ - if (ICE_REV(ice_dev->ice_hw_version, MAJOR) == 1 && - !qcom_ice_secure_ice_init(ice_dev)) { - pr_err("%s: Error: ICE_ERROR_ICE_TZ_INIT_FAILED\n", __func__); - err = -EFAULT; - goto out; - } - init_waitqueue_head(&ice_dev->block_suspend_ice_queue); - qcom_ice_low_power_mode_enable(ice_dev); - qcom_ice_optimization_enable(ice_dev); - qcom_ice_config_proc_ignore(ice_dev); - qcom_ice_enable_test_bus_config(ice_dev); - qcom_ice_enable(ice_dev); - ice_dev->is_ice_enabled = true; - qcom_ice_enable_intr(ice_dev); - atomic_set(&ice_dev->is_ice_suspended, 0); - atomic_set(&ice_dev->is_ice_busy, 0); out: return err; } -static int qcom_ice_init(struct platform_device *pdev, +static int qcom_ice_init(struct ice_device *ice_dev, void *host_controller_data, ice_error_cb error_cb) { @@ -1147,13 +1004,6 @@ static int qcom_ice_init(struct platform_device *pdev, * When any request for data transfer is received, it would enable * the ICE for that particular request */ - struct ice_device *ice_dev; - - ice_dev = platform_get_drvdata(pdev); - if (!ice_dev) { - pr_err("%s: invalid device\n", __func__); - return -EINVAL; - } ice_dev->error_cb = error_cb; ice_dev->host_controller_data = host_controller_data; @@ -1201,12 +1051,6 @@ static int qcom_ice_finish_power_collapse(struct ice_device *ice_dev) if (err) goto out; - /* - * for PFE case, clear the cached ICE key table, - * this will force keys to be reconfigured - * per each next transaction - */ - pfk_clear_on_reset(); } } @@ -1444,8 +1288,8 @@ static void qcom_ice_debug(struct platform_device *pdev) qcom_ice_dump_test_bus(ice_dev); pr_err("%s: ICE reset start time: %llu ICE reset done time: %llu\n", ice_dev->ice_instance_type, - (unsigned long long)ice_dev->ice_reset_start_time.tv64, - (unsigned long long)ice_dev->ice_reset_complete_time.tv64); + (unsigned long long)ice_dev->ice_reset_start_time, + (unsigned long long)ice_dev->ice_reset_complete_time); if (ktime_to_us(ktime_sub(ice_dev->ice_reset_complete_time, ice_dev->ice_reset_start_time)) > 0) @@ -1473,28 +1317,15 @@ static int qcom_ice_reset(struct platform_device *pdev) return qcom_ice_finish_power_collapse(ice_dev); } -static int qcom_ice_config_start(struct platform_device *pdev, - struct request *req, - struct ice_data_setting *setting, bool async) +int qcom_ice_config_start(struct request *req, struct ice_data_setting *setting) { - struct ice_crypto_setting pfk_crypto_data = {0}; struct ice_crypto_setting ice_data = {0}; - int ret = 0; - bool is_pfe = false; unsigned long sec_end = 0; sector_t data_size; - struct ice_device *ice_dev; - - if (!pdev || !req) { + if (!req) { pr_err("%s: Invalid params passed\n", __func__); return -EINVAL; } - ice_dev = platform_get_drvdata(pdev); - - if (!ice_dev) { - pr_err("%s: INVALID ice_dev\n", __func__); - return -EINVAL; - } /* * It is not an error to have a request with no bio @@ -1511,30 +1342,6 @@ static int qcom_ice_config_start(struct platform_device *pdev, return 0; } - if (atomic_read(&ice_dev->is_ice_suspended) == 1) - return -EINVAL; - - if (async) - atomic_set(&ice_dev->is_ice_busy, 1); - - ret = pfk_load_key_start(req->bio, &pfk_crypto_data, &is_pfe, async); - - if (async) { - atomic_set(&ice_dev->is_ice_busy, 0); - wake_up_interruptible(&ice_dev->block_suspend_ice_queue); - } - if (is_pfe) { - if (ret) { - if (ret != -EBUSY && ret != -EAGAIN) - pr_err("%s error %d while configuring ice key for PFE\n", - __func__, ret); - return ret; - } - - return qti_ice_setting_config(req, pdev, - &pfk_crypto_data, setting, ICE_CRYPTO_CXT_FBE); - } - if (ice_fde_flag && req->part && req->part->info && req->part->info->volname[0]) { if (!strcmp(req->part->info->volname, "userdata")) { @@ -1559,7 +1366,7 @@ static int qcom_ice_config_start(struct platform_device *pdev, if ((req->__sector + data_size) > sec_end) return 0; else - return qti_ice_setting_config(req, pdev, + return qti_ice_setting_config(req, &ice_data, setting, ICE_CRYPTO_CXT_FDE); } @@ -1575,34 +1382,6 @@ static int qcom_ice_config_start(struct platform_device *pdev, } EXPORT_SYMBOL(qcom_ice_config_start); -static int qcom_ice_config_end(struct request *req) -{ - int ret = 0; - bool is_pfe = false; - - if (!req) { - pr_err("%s: Invalid params passed\n", __func__); - return -EINVAL; - } - - if (!req->bio) { - /* It is not an error to have a request with no bio */ - return 0; - } - ret = pfk_load_key_end(req->bio, &is_pfe); - if (is_pfe) { - if (ret != 0) - pr_err("%s error %d while end configuring ice key for PFE\n", - __func__, ret); - return ret; - } - - - return 0; -} -EXPORT_SYMBOL(qcom_ice_config_end); - - static int qcom_ice_status(struct platform_device *pdev) { struct ice_device *ice_dev; @@ -1628,18 +1407,6 @@ static int qcom_ice_status(struct platform_device *pdev) } -struct qcom_ice_variant_ops qcom_ice_ops = { - .name = "qcom", - .init = qcom_ice_init, - .reset = qcom_ice_reset, - .resume = qcom_ice_resume, - .suspend = qcom_ice_suspend, - .config_start = qcom_ice_config_start, - .config_end = qcom_ice_config_end, - .status = qcom_ice_status, - .debug = qcom_ice_debug, -}; - struct platform_device *qcom_ice_get_pdevice(struct device_node *node) { struct platform_device *ice_pdev = NULL; @@ -1805,13 +1572,22 @@ int qcom_ice_setup_ice_hw(const char *storage_type, int enable) if (!ice_dev || (ice_dev->is_ice_enabled == false)) return ret; - if (enable) return enable_ice_setup(ice_dev); else return disable_ice_setup(ice_dev); } +static struct qcom_ice_variant_ops qcom_ice_ops = { + .name = "qcom", + .reset = qcom_ice_reset, + .resume = qcom_ice_resume, + .suspend = qcom_ice_suspend, + .config_start = qcom_ice_config_start, + .status = qcom_ice_status, + .debug = qcom_ice_debug, +}; + struct qcom_ice_variant_ops *qcom_ice_get_variant_ops(struct device_node *node) { return &qcom_ice_ops; diff --git a/drivers/crypto/msm/qce50.c b/drivers/crypto/msm/qce50.c index 3d9b11ca2cf03a79d303921bb69d58bde30dc604..e73d5534d214b05deeef4a8e324b24877e764921 100644 --- a/drivers/crypto/msm/qce50.c +++ b/drivers/crypto/msm/qce50.c @@ -848,6 +848,11 @@ static int _ce_setup_cipher(struct qce_device *pce_dev, struct qce_req *creq, switch (creq->alg) { case CIPHER_ALG_DES: if (creq->mode != QCE_MODE_ECB) { + if (ivsize > MAX_IV_LENGTH) { + pr_err("%s: error: Invalid length parameter\n", + __func__); + return -EINVAL; + } _byte_stream_to_net_words(enciv32, creq->iv, ivsize); pce = cmdlistinfo->encr_cntr_iv; pce->data = enciv32[0]; diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index c1f8da958c78b1c87dd083b729bd4832ab9f907b..4e38b87c3228467581cddb5739a6f93fcaa52957 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -168,8 +168,6 @@ struct omap_sham_hmac_ctx { }; struct omap_sham_ctx { - struct omap_sham_dev *dd; - unsigned long flags; /* fallback stuff */ @@ -916,27 +914,35 @@ static int omap_sham_update_dma_stop(struct omap_sham_dev *dd) return 0; } +struct omap_sham_dev *omap_sham_find_dev(struct omap_sham_reqctx *ctx) +{ + struct omap_sham_dev *dd; + + if (ctx->dd) + return ctx->dd; + + spin_lock_bh(&sham.lock); + dd = list_first_entry(&sham.dev_list, struct omap_sham_dev, list); + list_move_tail(&dd->list, &sham.dev_list); + ctx->dd = dd; + spin_unlock_bh(&sham.lock); + + return dd; +} + static int omap_sham_init(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm); struct omap_sham_reqctx *ctx = ahash_request_ctx(req); - struct omap_sham_dev *dd = NULL, *tmp; + struct omap_sham_dev *dd; int bs = 0; - spin_lock_bh(&sham.lock); - if (!tctx->dd) { - list_for_each_entry(tmp, &sham.dev_list, list) { - dd = tmp; - break; - } - tctx->dd = dd; - } else { - dd = tctx->dd; - } - spin_unlock_bh(&sham.lock); + ctx->dd = NULL; - ctx->dd = dd; + dd = omap_sham_find_dev(ctx); + if (!dd) + return -ENODEV; ctx->flags = 0; @@ -1186,8 +1192,7 @@ static int omap_sham_handle_queue(struct omap_sham_dev *dd, static int omap_sham_enqueue(struct ahash_request *req, unsigned int op) { struct omap_sham_reqctx *ctx = ahash_request_ctx(req); - struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm); - struct omap_sham_dev *dd = tctx->dd; + struct omap_sham_dev *dd = ctx->dd; ctx->op = op; @@ -1197,7 +1202,7 @@ static int omap_sham_enqueue(struct ahash_request *req, unsigned int op) static int omap_sham_update(struct ahash_request *req) { struct omap_sham_reqctx *ctx = ahash_request_ctx(req); - struct omap_sham_dev *dd = ctx->dd; + struct omap_sham_dev *dd = omap_sham_find_dev(ctx); if (!req->nbytes) return 0; @@ -1302,21 +1307,8 @@ static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key, struct omap_sham_hmac_ctx *bctx = tctx->base; int bs = crypto_shash_blocksize(bctx->shash); int ds = crypto_shash_digestsize(bctx->shash); - struct omap_sham_dev *dd = NULL, *tmp; int err, i; - spin_lock_bh(&sham.lock); - if (!tctx->dd) { - list_for_each_entry(tmp, &sham.dev_list, list) { - dd = tmp; - break; - } - tctx->dd = dd; - } else { - dd = tctx->dd; - } - spin_unlock_bh(&sham.lock); - err = crypto_shash_setkey(tctx->fallback, key, keylen); if (err) return err; @@ -1334,7 +1326,7 @@ static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key, memset(bctx->ipad + keylen, 0, bs - keylen); - if (!test_bit(FLAGS_AUTO_XOR, &dd->flags)) { + if (!test_bit(FLAGS_AUTO_XOR, &sham.flags)) { memcpy(bctx->opad, bctx->ipad, bs); for (i = 0; i < bs; i++) { @@ -2073,6 +2065,7 @@ static int omap_sham_probe(struct platform_device *pdev) } dd->flags |= dd->pdata->flags; + sham.flags |= dd->pdata->flags; pm_runtime_use_autosuspend(dev); pm_runtime_set_autosuspend_delay(dev, DEFAULT_AUTOSUSPEND_DELAY); @@ -2098,6 +2091,9 @@ static int omap_sham_probe(struct platform_device *pdev) spin_unlock(&sham.lock); for (i = 0; i < dd->pdata->algs_info_size; i++) { + if (dd->pdata->algs_info[i].registered) + break; + for (j = 0; j < dd->pdata->algs_info[i].size; j++) { struct ahash_alg *alg; @@ -2143,9 +2139,11 @@ static int omap_sham_remove(struct platform_device *pdev) list_del(&dd->list); spin_unlock(&sham.lock); for (i = dd->pdata->algs_info_size - 1; i >= 0; i--) - for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) + for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) { crypto_unregister_ahash( &dd->pdata->algs_info[i].algs_list[j]); + dd->pdata->algs_info[i].registered--; + } tasklet_kill(&dd->done_task); pm_runtime_disable(&pdev->dev); diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index fef2b306cdee04652f634f282e396f36bfa962c8..6c8a03a1132f66b1a636e82933f759df4bd58b16 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -2636,7 +2636,6 @@ static struct talitos_alg_template driver_algs[] = { .cra_ablkcipher = { .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, - .ivsize = AES_BLOCK_SIZE, } }, .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | @@ -2670,6 +2669,7 @@ static struct talitos_alg_template driver_algs[] = { .cra_ablkcipher = { .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, .setkey = ablkcipher_aes_setkey, } }, diff --git a/drivers/crypto/virtio/virtio_crypto_algs.c b/drivers/crypto/virtio/virtio_crypto_algs.c index e2231a1a05a12a012cde1fb2a24ee97fededcff1..e6b889ce395ea2efc051ff7e9578b43e29b4944a 100644 --- a/drivers/crypto/virtio/virtio_crypto_algs.c +++ b/drivers/crypto/virtio/virtio_crypto_algs.c @@ -354,13 +354,18 @@ __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req, int err; unsigned long flags; struct scatterlist outhdr, iv_sg, status_sg, **sgs; - int i; u64 dst_len; unsigned int num_out = 0, num_in = 0; int sg_total; uint8_t *iv; + struct scatterlist *sg; src_nents = sg_nents_for_len(req->src, req->nbytes); + if (src_nents < 0) { + pr_err("Invalid number of src SG.\n"); + return src_nents; + } + dst_nents = sg_nents(req->dst); pr_debug("virtio_crypto: Number of sgs (src_nents: %d, dst_nents: %d)\n", @@ -406,6 +411,7 @@ __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req, goto free; } + dst_len = min_t(unsigned int, req->nbytes, dst_len); pr_debug("virtio_crypto: src_len: %u, dst_len: %llu\n", req->nbytes, dst_len); @@ -441,12 +447,12 @@ __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req, vc_sym_req->iv = iv; /* Source data */ - for (i = 0; i < src_nents; i++) - sgs[num_out++] = &req->src[i]; + for (sg = req->src; src_nents; sg = sg_next(sg), src_nents--) + sgs[num_out++] = sg; /* Destination data */ - for (i = 0; i < dst_nents; i++) - sgs[num_out + num_in++] = &req->dst[i]; + for (sg = req->dst; sg; sg = sg_next(sg)) + sgs[num_out + num_in++] = sg; /* Status */ sg_init_one(&status_sg, &vc_req->status, sizeof(vc_req->status)); @@ -569,10 +575,11 @@ static void virtio_crypto_ablkcipher_finalize_req( struct ablkcipher_request *req, int err) { - crypto_finalize_cipher_request(vc_sym_req->base.dataq->engine, - req, err); kzfree(vc_sym_req->iv); virtcrypto_clear_request(&vc_sym_req->base); + + crypto_finalize_cipher_request(vc_sym_req->base.dataq->engine, + req, err); } static struct crypto_alg virtio_crypto_algs[] = { { diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c index c7568869284e17d4b63379b236a0f30391640820..0d2c6e13a01f8f01a6bc92b8381973206f3dac0a 100644 --- a/drivers/dma/fsl-edma.c +++ b/drivers/dma/fsl-edma.c @@ -682,6 +682,13 @@ static irqreturn_t fsl_edma_tx_handler(int irq, void *dev_id) fsl_chan = &fsl_edma->chans[ch]; spin_lock(&fsl_chan->vchan.lock); + + if (!fsl_chan->edesc) { + /* terminate_all called before */ + spin_unlock(&fsl_chan->vchan.lock); + continue; + } + if (!fsl_chan->edesc->iscyclic) { list_del(&fsl_chan->edesc->vdesc.node); vchan_cookie_complete(&fsl_chan->edesc->vdesc); diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index e3899ae429e0ff18960dd4e37b73462454fd6fbe..4c2b41beaf638203ce0ea3069fa659fa1f4349ce 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -38,6 +38,18 @@ #include "../dmaengine.h" +int completion_timeout = 200; +module_param(completion_timeout, int, 0644); +MODULE_PARM_DESC(completion_timeout, + "set ioat completion timeout [msec] (default 200 [msec])"); +int idle_timeout = 2000; +module_param(idle_timeout, int, 0644); +MODULE_PARM_DESC(idle_timeout, + "set ioat idel timeout [msec] (default 2000 [msec])"); + +#define IDLE_TIMEOUT msecs_to_jiffies(idle_timeout) +#define COMPLETION_TIMEOUT msecs_to_jiffies(completion_timeout) + static char *chanerr_str[] = { "DMA Transfer Source Address Error", "DMA Transfer Destination Address Error", diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index 56200eefcf5eed792137a0ea2b9b4d5c295484ea..01f929957230358a45c6874b24790b34ee320985 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h @@ -111,8 +111,6 @@ struct ioatdma_chan { #define IOAT_RUN 5 #define IOAT_CHAN_ACTIVE 6 struct timer_list timer; - #define COMPLETION_TIMEOUT msecs_to_jiffies(100) - #define IDLE_TIMEOUT msecs_to_jiffies(2000) #define RESET_DELAY msecs_to_jiffies(100) struct ioatdma_device *ioat_dma; dma_addr_t completion_dma; diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c index 13c68b6434ce276dbf2b5780b24c32d5824badde..15b4a44e6006972c0125dd1cd05c88371ffcb19b 100644 --- a/drivers/dma/mmp_tdma.c +++ b/drivers/dma/mmp_tdma.c @@ -362,6 +362,8 @@ static void mmp_tdma_free_descriptor(struct mmp_tdma_chan *tdmac) gen_pool_free(gpool, (unsigned long)tdmac->desc_arr, size); tdmac->desc_arr = NULL; + if (tdmac->status == DMA_ERROR) + tdmac->status = DMA_COMPLETE; return; } diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c index f9028e9d0dfc269cadd69b843aca254e8d46acad..d6af2d439b979467255eca0c213eed1fa3925284 100644 --- a/drivers/dma/pch_dma.c +++ b/drivers/dma/pch_dma.c @@ -873,6 +873,7 @@ static int pch_dma_probe(struct pci_dev *pdev, } pci_set_master(pdev); + pd->dma.dev = &pdev->dev; err = request_irq(pdev->irq, pd_irq, IRQF_SHARED, DRV_NAME, pd); if (err) { @@ -888,7 +889,6 @@ static int pch_dma_probe(struct pci_dev *pdev, goto err_free_irq; } - pd->dma.dev = &pdev->dev; INIT_LIST_HEAD(&pd->dma.channels); diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c index 4f4733d831a1ac9fc3043c4d8b8d6b5b2fd0ac8e..86b45198fb962c919277f535dab220de320961ce 100644 --- a/drivers/dma/tegra210-adma.c +++ b/drivers/dma/tegra210-adma.c @@ -583,6 +583,7 @@ static int tegra_adma_alloc_chan_resources(struct dma_chan *dc) ret = pm_runtime_get_sync(tdc2dev(tdc)); if (ret < 0) { + pm_runtime_put_noidle(tdc2dev(tdc)); free_irq(tdc->irq, tdc); return ret; } @@ -764,8 +765,10 @@ static int tegra_adma_probe(struct platform_device *pdev) pm_runtime_enable(&pdev->dev); ret = pm_runtime_get_sync(&pdev->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_noidle(&pdev->dev); goto rpm_disable; + } ret = tegra_adma_init(tdma); if (ret) @@ -793,7 +796,7 @@ static int tegra_adma_probe(struct platform_device *pdev) ret = dma_async_device_register(&tdma->dma_dev); if (ret < 0) { dev_err(&pdev->dev, "ADMA registration failed: %d\n", ret); - goto irq_dispose; + goto rpm_put; } ret = of_dma_controller_register(pdev->dev.of_node, diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index b36abd2537863f15aa100ea2cad9627327b144b2..21c5f95596be835f8f55fe38689adbfa4abe4bdf 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -261,6 +261,8 @@ static int get_scrub_rate(struct mem_ctl_info *mci) if (pvt->model == 0x60) amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval); + else + amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval); break; case 0x17: diff --git a/drivers/extcon/extcon-adc-jack.c b/drivers/extcon/extcon-adc-jack.c index 6f6537ab0a7911e5dbfaaee5e2f2794326588838..59e6ca685be856f5991f21dfa24805c45f3ea6bb 100644 --- a/drivers/extcon/extcon-adc-jack.c +++ b/drivers/extcon/extcon-adc-jack.c @@ -128,7 +128,7 @@ static int adc_jack_probe(struct platform_device *pdev) for (i = 0; data->adc_conditions[i].id != EXTCON_NONE; i++); data->num_conditions = i; - data->chan = iio_channel_get(&pdev->dev, pdata->consumer_channel); + data->chan = devm_iio_channel_get(&pdev->dev, pdata->consumer_channel); if (IS_ERR(data->chan)) return PTR_ERR(data->chan); @@ -170,7 +170,6 @@ static int adc_jack_remove(struct platform_device *pdev) free_irq(data->irq, data); cancel_work_sync(&data->handler.work); - iio_channel_release(data->chan); return 0; } diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig index 86210f75d23331899f6454eb6b1c20de0e0ae992..a4fadd42221d36223a2bc676342daf8705e7c3ad 100644 --- a/drivers/firmware/efi/Kconfig +++ b/drivers/firmware/efi/Kconfig @@ -164,6 +164,17 @@ config RESET_ATTACK_MITIGATION have been evicted, since otherwise it will trigger even on clean reboots. +config EFI_CUSTOM_SSDT_OVERLAYS + bool "Load custom ACPI SSDT overlay from an EFI variable" + depends on EFI_VARS && ACPI + default ACPI_TABLE_UPGRADE + help + Allow loading of an ACPI SSDT overlay from an EFI variable specified + by a kernel command line option. + + See Documentation/admin-guide/acpi/ssdt-overlays.rst for more + information.endmenu + endmenu config UEFI_CPER diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index b39b7e6d4e4dc1a6bcbde746472714b82f5ad521..a3dc6cb7326a5b8e07db9284ca086e867dc4dfc1 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -221,7 +221,7 @@ static void generic_ops_unregister(void) efivars_unregister(&generic_efivars); } -#if IS_ENABLED(CONFIG_ACPI) +#ifdef CONFIG_EFI_CUSTOM_SSDT_OVERLAYS #define EFIVAR_SSDT_NAME_MAX 16 static char efivar_ssdt[EFIVAR_SSDT_NAME_MAX] __initdata; static int __init efivar_ssdt_setup(char *str) diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c index 1c65f5ac43686cea83939ad3672ca29ba00278f1..6529addd1e82beaa217a9a417eb3cfabdd05405c 100644 --- a/drivers/firmware/efi/efivars.c +++ b/drivers/firmware/efi/efivars.c @@ -586,8 +586,10 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var) ret = kobject_init_and_add(&new_var->kobj, &efivar_ktype, NULL, "%s", short_name); kfree(short_name); - if (ret) + if (ret) { + kobject_put(&new_var->kobj); return ret; + } kobject_uevent(&new_var->kobj, KOBJ_ADD); if (efivar_entry_add(new_var, &efivar_sysfs_list)) { diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c index f3c28777b8c6fed9bb351ad1d93307b3651a8adb..deb1d8f3bdc8cf9b2f6c5478734200d9fb4ecc44 100644 --- a/drivers/firmware/efi/esrt.c +++ b/drivers/firmware/efi/esrt.c @@ -180,7 +180,7 @@ static int esre_create_sysfs_entry(void *esre, int entry_num) rc = kobject_init_and_add(&entry->kobj, &esre1_ktype, NULL, "entry%d", entry_num); if (rc) { - kfree(entry); + kobject_put(&entry->kobj); return rc; } } diff --git a/drivers/gpio/gpio-arizona.c b/drivers/gpio/gpio-arizona.c index d4e6ba0301bc317b3bf3d6c9fbd767efb93b32fd..694674dfbf82a84b806cda8f3fd6c0126778c708 100644 --- a/drivers/gpio/gpio-arizona.c +++ b/drivers/gpio/gpio-arizona.c @@ -69,6 +69,7 @@ static int arizona_gpio_get(struct gpio_chip *chip, unsigned offset) ret = pm_runtime_get_sync(chip->parent); if (ret < 0) { dev_err(chip->parent, "Failed to resume: %d\n", ret); + pm_runtime_put_autosuspend(chip->parent); return ret; } @@ -77,12 +78,15 @@ static int arizona_gpio_get(struct gpio_chip *chip, unsigned offset) if (ret < 0) { dev_err(chip->parent, "Failed to drop cache: %d\n", ret); + pm_runtime_put_autosuspend(chip->parent); return ret; } ret = regmap_read(arizona->regmap, reg, &val); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_autosuspend(chip->parent); return ret; + } pm_runtime_mark_last_busy(chip->parent); pm_runtime_put_autosuspend(chip->parent); @@ -111,6 +115,7 @@ static int arizona_gpio_direction_out(struct gpio_chip *chip, ret = pm_runtime_get_sync(chip->parent); if (ret < 0) { dev_err(chip->parent, "Failed to resume: %d\n", ret); + pm_runtime_put(chip->parent); return ret; } } diff --git a/drivers/gpio/gpio-exar.c b/drivers/gpio/gpio-exar.c index a09d2f9ebacc8d4909d79119333e344453ea6e0a..695c19901eff0ed27b9e49372d8c5dc475ab360c 100644 --- a/drivers/gpio/gpio-exar.c +++ b/drivers/gpio/gpio-exar.c @@ -148,8 +148,10 @@ static int gpio_exar_probe(struct platform_device *pdev) mutex_init(&exar_gpio->lock); index = ida_simple_get(&ida_index, 0, 0, GFP_KERNEL); - if (index < 0) - goto err_destroy; + if (index < 0) { + ret = index; + goto err_mutex_destroy; + } sprintf(exar_gpio->name, "exar_gpio%d", index); exar_gpio->gpio_chip.label = exar_gpio->name; @@ -176,6 +178,7 @@ static int gpio_exar_probe(struct platform_device *pdev) err_destroy: ida_simple_remove(&ida_index, index); +err_mutex_destroy: mutex_destroy(&exar_gpio->lock); return ret; } diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c index 1eb857e2f62f166339bce3ff3c673b7c256845f4..dd801f5d525309fa9e2508fceda521335b872ad6 100644 --- a/drivers/gpio/gpio-tegra.c +++ b/drivers/gpio/gpio-tegra.c @@ -356,6 +356,7 @@ static void tegra_gpio_irq_shutdown(struct irq_data *d) struct tegra_gpio_info *tgi = bank->tgi; unsigned int gpio = d->hwirq; + tegra_gpio_irq_mask(d); gpiochip_unlock_as_irq(&tgi->gc, gpio); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 4894d8a87c0493e1a955c2dc90b3f43ee479e944..ae23f7e0290c3ef7e801c6e6d9d242347d418b6e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -728,7 +728,6 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev) drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; drm_kms_helper_poll_disable(drm_dev); - vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF); ret = amdgpu_device_suspend(drm_dev, false, false); pci_save_state(pdev); @@ -765,7 +764,6 @@ static int amdgpu_pmops_runtime_resume(struct device *dev) ret = amdgpu_device_resume(drm_dev, false, false); drm_kms_helper_poll_enable(drm_dev); - vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON); drm_dev->switch_power_state = DRM_SWITCH_POWER_ON; return 0; } diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c index 67469c26bae88f9ada64e0866d73fbdcfe793886..45a027d7a1e4986c4d4a7b0e897f533a6020ccaa 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c @@ -20,13 +20,15 @@ static void adv7511_calc_cts_n(unsigned int f_tmds, unsigned int fs, { switch (fs) { case 32000: - *n = 4096; + case 48000: + case 96000: + case 192000: + *n = fs * 128 / 1000; break; case 44100: - *n = 6272; - break; - case 48000: - *n = 6144; + case 88200: + case 176400: + *n = fs * 128 / 900; break; } diff --git a/drivers/gpu/drm/bridge/analogix-anx7625.c b/drivers/gpu/drm/bridge/analogix-anx7625.c index 8a45ad58d3f936d98624ac1aaade4affee1a69b0..ce89f31ff13ab60b0b035969618bbb8c04b53d1a 100644 --- a/drivers/gpu/drm/bridge/analogix-anx7625.c +++ b/drivers/gpu/drm/bridge/analogix-anx7625.c @@ -1289,7 +1289,7 @@ static void anx7625_bridge_enable(struct drm_bridge *bridge) #ifdef CONFIG_PM_SLEEP if (anx7625->out_of_hibr) { anx7625->out_of_hibr = false; - place_marker("Hiber: Display up"); + update_marker("Hiber: Display up"); } #endif if (!anx7625->powered) diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index fe59b05bbe7ba36ebfb564759fdb4088d5241e5f..4fdb0fef64f6282238d72872b10e2e67191a2963 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include @@ -2886,6 +2887,17 @@ static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr, return ret; } +static int do_get_act_status(struct drm_dp_aux *aux) +{ + int ret; + u8 status; + + ret = drm_dp_dpcd_readb(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status); + if (ret < 0) + return ret; + + return status; +} /** * drm_dp_check_act_status() - Check ACT handled status. @@ -2895,33 +2907,29 @@ static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr, */ int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr) { - u8 status; - int ret; - int count = 0; - - do { - ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status); - - if (ret < 0) { - DRM_DEBUG_KMS("failed to read payload table status %d\n", ret); - goto fail; - } - - if (status & DP_PAYLOAD_ACT_HANDLED) - break; - count++; - udelay(100); - - } while (count < 30); - - if (!(status & DP_PAYLOAD_ACT_HANDLED)) { - DRM_DEBUG_KMS("failed to get ACT bit %d after %d retries\n", status, count); - ret = -EINVAL; - goto fail; + /* + * There doesn't seem to be any recommended retry count or timeout in + * the MST specification. Since some hubs have been observed to take + * over 1 second to update their payload allocations under certain + * conditions, we use a rather large timeout value. + */ + const int timeout_ms = 3000; + int ret, status; + + ret = readx_poll_timeout(do_get_act_status, mgr->aux, status, + status & DP_PAYLOAD_ACT_HANDLED || status < 0, + 200, timeout_ms * USEC_PER_MSEC); + if (ret < 0 && status >= 0) { + DRM_DEBUG_KMS("Failed to get ACT after %dms, last status: %02x\n", + timeout_ms, status); + return -EINVAL; + } else if (status < 0) { + DRM_DEBUG_KMS("Failed to read payload table status: %d\n", + status); + return status; } + return 0; -fail: - return ret; } EXPORT_SYMBOL(drm_dp_check_act_status); diff --git a/drivers/gpu/drm/drm_encoder_slave.c b/drivers/gpu/drm/drm_encoder_slave.c index cf804389f5ecad3ea7589fc0d074ec6c12578ca6..d50a7884e69e187b5d625b9a601dd3b45d9bc021 100644 --- a/drivers/gpu/drm/drm_encoder_slave.c +++ b/drivers/gpu/drm/drm_encoder_slave.c @@ -84,7 +84,7 @@ int drm_i2c_encoder_init(struct drm_device *dev, err = encoder_drv->encoder_init(client, dev, encoder); if (err) - goto fail_unregister; + goto fail_module_put; if (info->platform_data) encoder->slave_funcs->set_config(&encoder->base, @@ -92,9 +92,10 @@ int drm_i2c_encoder_init(struct drm_device *dev, return 0; +fail_module_put: + module_put(module); fail_unregister: i2c_unregister_device(client); - module_put(module); fail: return err; } diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c index ba4a32b132baada502df485477d85281600b872f..59ce068b152f5fc252f16f0134e169239c9a3ead 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_mic.c +++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c @@ -267,8 +267,10 @@ static void mic_pre_enable(struct drm_bridge *bridge) goto unlock; ret = pm_runtime_get_sync(mic->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_noidle(mic->dev); goto unlock; + } mic_set_path(mic, 1); diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c index e4b9eb1f6b6021b0563341aff68ae4eab3c75f98..f6b81f3256cf0fd08ef241f09a984f671067ea8b 100644 --- a/drivers/gpu/drm/i915/i915_cmd_parser.c +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c @@ -570,6 +570,9 @@ struct drm_i915_reg_descriptor { #define REG32(_reg, ...) \ { .addr = (_reg), __VA_ARGS__ } +#define REG32_IDX(_reg, idx) \ + { .addr = _reg(idx) } + /* * Convenience macro for adding 64-bit registers. * @@ -667,6 +670,7 @@ static const struct drm_i915_reg_descriptor gen9_blt_regs[] = { REG64_IDX(RING_TIMESTAMP, BSD_RING_BASE), REG32(BCS_SWCTRL), REG64_IDX(RING_TIMESTAMP, BLT_RING_BASE), + REG32_IDX(RING_CTX_TIMESTAMP, BLT_RING_BASE), REG64_IDX(BCS_GPR, 0), REG64_IDX(BCS_GPR, 1), REG64_IDX(BCS_GPR, 2), diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index d99d05a91032e6cf97462f0a6ced5493488a8307..bf13299ebb5584ae821b1cc0f920bdee13bbf59b 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -1566,7 +1566,9 @@ static int eb_copy_relocations(const struct i915_execbuffer *eb) * happened we would make the mistake of assuming that the * relocations were valid. */ - user_access_begin(); + if (!user_access_begin(VERIFY_WRITE, urelocs, size)) + goto end_user; + for (copied = 0; copied < nreloc; copied++) unsafe_put_user(-1, &urelocs[copied].presumed_offset, @@ -2601,6 +2603,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, struct drm_i915_gem_execbuffer2 *args = data; struct drm_i915_gem_exec_object2 *exec2_list; struct drm_syncobj **fences = NULL; + const size_t count = args->buffer_count; int err; if (args->buffer_count < 1 || args->buffer_count > SIZE_MAX / sz - 1) { @@ -2649,7 +2652,17 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, unsigned int i; /* Copy the new buffer offsets back to the user's exec list. */ - user_access_begin(); + /* + * Note: count * sizeof(*user_exec_list) does not overflow, + * because we checked 'count' in check_buffer_count(). + * + * And this range already got effectively checked earlier + * when we did the "copy_from_user()" above. + */ + if (!user_access_begin(VERIFY_WRITE, user_exec_list, + count * sizeof(*user_exec_list))) + goto end_user; + for (i = 0; i < args->buffer_count; i++) { if (!(exec2_list[i].offset & UPDATE)) continue; diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c index 91ea89bd7ac30720ba1a70c554c5f430863080cc..c0e9ee84dc9f01c4a9f1bfbf904c03b09406d031 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c @@ -2632,8 +2632,7 @@ void dsi_ctrl_disable_status_interrupt(struct dsi_ctrl *dsi_ctrl, { unsigned long flags; - if (!dsi_ctrl || dsi_ctrl->irq_info.irq_num == -1 || - intr_idx >= DSI_STATUS_INTERRUPT_COUNT) + if (!dsi_ctrl || intr_idx >= DSI_STATUS_INTERRUPT_COUNT) return; spin_lock_irqsave(&dsi_ctrl->irq_info.irq_lock, flags); @@ -2645,7 +2644,8 @@ void dsi_ctrl_disable_status_interrupt(struct dsi_ctrl *dsi_ctrl, dsi_ctrl->irq_info.irq_stat_mask); /* don't need irq if no lines are enabled */ - if (dsi_ctrl->irq_info.irq_stat_mask == 0) + if (dsi_ctrl->irq_info.irq_stat_mask == 0 && + dsi_ctrl->irq_info.irq_num != -1) disable_irq_nosync(dsi_ctrl->irq_info.irq_num); } diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c index f25601b33ca92cc854f4d4ba72fe248a3934b063..7802008b07ae3e8e3a3c9c4aec711cda7666f2c8 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c @@ -972,7 +972,8 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev) return 0; fail: - mdp5_destroy(pdev); + if (mdp5_kms) + mdp5_destroy(pdev); return ret; } diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index ec037cd5e2346447ad347670353e662389152ad7..35d108c991b16698c4cdba971a80c6884be79e39 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -1380,24 +1380,27 @@ static int msm_ioctl_register_event(struct drm_device *dev, void *data, * calls add to client list and return. */ count = msm_event_client_count(dev, req_event, false); - /* Add current client to list */ - spin_lock_irqsave(&dev->event_lock, flag); - list_add_tail(&client->base.link, &priv->client_event_list); - spin_unlock_irqrestore(&dev->event_lock, flag); - - if (count) + if (count) { + /* Add current client to list */ + spin_lock_irqsave(&dev->event_lock, flag); + list_add_tail(&client->base.link, &priv->client_event_list); + spin_unlock_irqrestore(&dev->event_lock, flag); return 0; + } ret = msm_register_event(dev, req_event, file, true); if (ret) { DRM_ERROR("failed to enable event %x object %x object id %d\n", req_event->event, req_event->object_type, req_event->object_id); + kfree(client); + } else { + /* Add current client to list */ spin_lock_irqsave(&dev->event_lock, flag); - list_del(&client->base.link); + list_add_tail(&client->base.link, &priv->client_event_list); spin_unlock_irqrestore(&dev->event_lock, flag); - kfree(client); } + return ret; } diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 70a8d0b0c4f10826c14d24c51e6e82435de752f5..d00524a5d7f08b11b39cd7a9df9bea60cfc38173 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -754,7 +754,6 @@ nouveau_pmops_runtime_suspend(struct device *dev) } drm_kms_helper_poll_disable(drm_dev); - vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF); nouveau_switcheroo_optimus_dsm(); ret = nouveau_do_suspend(drm_dev, true); pci_save_state(pdev); @@ -789,7 +788,6 @@ nouveau_pmops_runtime_resume(struct device *dev) /* do magic */ nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25)); - vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON); drm_dev->switch_power_state = DRM_SWITCH_POWER_ON; /* Monitors may have been connected / disconnected during suspend */ diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c index c8ab1b5741a3e3c0e93a9943c0b3811b8c646ea6..db7769cb33ebadfa10078c05f019c35cd4680338 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c @@ -118,10 +118,10 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry, if (retries) udelay(400); - /* transaction request, wait up to 1ms for it to complete */ + /* transaction request, wait up to 2ms for it to complete */ nvkm_wr32(device, 0x00e4e4 + base, 0x00010000 | ctrl); - timeout = 1000; + timeout = 2000; do { ctrl = nvkm_rd32(device, 0x00e4e4 + base); udelay(1); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c index 7ef60895f43a7808229a8d72e27b2b54d67f3d48..edb6148cbca042c544939adb2cae2588e90a3e0e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c @@ -118,10 +118,10 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry, if (retries) udelay(400); - /* transaction request, wait up to 1ms for it to complete */ + /* transaction request, wait up to 2ms for it to complete */ nvkm_wr32(device, 0x00d954 + base, 0x00010000 | ctrl); - timeout = 1000; + timeout = 2000; do { ctrl = nvkm_rd32(device, 0x00d954 + base); udelay(1); diff --git a/drivers/gpu/drm/qxl/qxl_image.c b/drivers/gpu/drm/qxl/qxl_image.c index 7fbcc35e8ad35bfa8cce9f3323a4f4e5d574f21e..c89c10055641e55d4228642396e3a475d1331d2b 100644 --- a/drivers/gpu/drm/qxl/qxl_image.c +++ b/drivers/gpu/drm/qxl/qxl_image.c @@ -210,7 +210,8 @@ qxl_image_init_helper(struct qxl_device *qdev, break; default: DRM_ERROR("unsupported image bit depth\n"); - return -EINVAL; /* TODO: cleanup */ + qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr); + return -EINVAL; } image->u.bitmap.flags = QXL_BITMAP_TOP_DOWN; image->u.bitmap.x = width; diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c index c5716a0ca3b8b2c4afd3b1f60948127bedc90e0a..20ca0a75e685ef74bbd660a3932acba6ac3c27f7 100644 --- a/drivers/gpu/drm/qxl/qxl_kms.c +++ b/drivers/gpu/drm/qxl/qxl_kms.c @@ -181,7 +181,7 @@ int qxl_device_init(struct qxl_device *qdev, &(qdev->ram_header->cursor_ring_hdr), sizeof(struct qxl_command), QXL_CURSOR_RING_SIZE, - qdev->io_base + QXL_IO_NOTIFY_CMD, + qdev->io_base + QXL_IO_NOTIFY_CURSOR, false, &qdev->cursor_event); diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c index c97fbb2ab48b45dc07e1f359caaf1a1df420794d..6e607cc7b6e5a0bec452c16f6ce6daea383c7682 100644 --- a/drivers/gpu/drm/radeon/ci_dpm.c +++ b/drivers/gpu/drm/radeon/ci_dpm.c @@ -5551,6 +5551,7 @@ static int ci_parse_power_table(struct radeon_device *rdev) if (!rdev->pm.dpm.ps) return -ENOMEM; power_state_offset = (u8 *)state_array->states; + rdev->pm.dpm.num_ps = 0; for (i = 0; i < state_array->ucNumEntries; i++) { u8 *idx; power_state = (union pplib_power_state *)power_state_offset; @@ -5560,10 +5561,8 @@ static int ci_parse_power_table(struct radeon_device *rdev) if (!rdev->pm.power_state[i].clock_info) return -EINVAL; ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL); - if (ps == NULL) { - kfree(rdev->pm.dpm.ps); + if (ps == NULL) return -ENOMEM; - } rdev->pm.dpm.ps[i].ps_priv = ps; ci_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i], non_clock_info, @@ -5585,8 +5584,8 @@ static int ci_parse_power_table(struct radeon_device *rdev) k++; } power_state_offset += 2 + power_state->v2.ucNumDPMLevels; + rdev->pm.dpm.num_ps = i + 1; } - rdev->pm.dpm.num_ps = state_array->ucNumEntries; /* fill in the vce power states */ for (i = 0; i < RADEON_MAX_VCE_LEVELS; i++) { diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c index 9416e72f86aafcc2bcb8cb126e05641f9fe52f39..d491b3aa124f3058039eb72b5fe75523bf797df7 100644 --- a/drivers/gpu/drm/radeon/ni_dpm.c +++ b/drivers/gpu/drm/radeon/ni_dpm.c @@ -2126,7 +2126,7 @@ static int ni_init_smc_spll_table(struct radeon_device *rdev) if (clk_s & ~(SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT)) ret = -EINVAL; - if (clk_s & ~(SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT)) + if (fb_div & ~(SMC_NISLANDS_SPLL_DIV_TABLE_FBDIV_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT)) ret = -EINVAL; if (clk_v & ~(SMC_NISLANDS_SPLL_DIV_TABLE_CLKV_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT)) diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index f4becad0a78c0b0b13bdf14cb8a0d077ef805841..f6908e2f9e55a1839e5c24db77c805878eea8057 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -424,7 +424,6 @@ static int radeon_pmops_runtime_suspend(struct device *dev) drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; drm_kms_helper_poll_disable(drm_dev); - vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF); ret = radeon_suspend_kms(drm_dev, false, false, false); pci_save_state(pdev); @@ -461,7 +460,6 @@ static int radeon_pmops_runtime_resume(struct device *dev) ret = radeon_resume_kms(drm_dev, false, false); drm_kms_helper_poll_enable(drm_dev); - vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON); drm_dev->switch_power_state = DRM_SWITCH_POWER_ON; return 0; } diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi.h b/drivers/gpu/drm/sun4i/sun4i_hdmi.h index a1f8cba251a245af7227d853da784518d19d405d..3d9148eb40a7e156024112c7873251586a38acf6 100644 --- a/drivers/gpu/drm/sun4i/sun4i_hdmi.h +++ b/drivers/gpu/drm/sun4i/sun4i_hdmi.h @@ -143,7 +143,7 @@ #define SUN4I_HDMI_DDC_CMD_IMPLICIT_WRITE 3 #define SUN4I_HDMI_DDC_CLK_REG 0x528 -#define SUN4I_HDMI_DDC_CLK_M(m) (((m) & 0x7) << 3) +#define SUN4I_HDMI_DDC_CLK_M(m) (((m) & 0xf) << 3) #define SUN4I_HDMI_DDC_CLK_N(n) ((n) & 0x7) #define SUN4I_HDMI_DDC_LINE_CTRL_REG 0x540 diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c index 4692e8c345ed43dba9c29e280be0042777d9efd5..58d9557a774fe43c09593ef64659efa6b40c25a9 100644 --- a/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c +++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c @@ -32,7 +32,7 @@ static unsigned long sun4i_ddc_calc_divider(unsigned long rate, unsigned long best_rate = 0; u8 best_m = 0, best_n = 0, _m, _n; - for (_m = 0; _m < 8; _m++) { + for (_m = 0; _m < 16; _m++) { for (_n = 0; _n < 8; _n++) { unsigned long tmp_rate; diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c index 298d6a8bab120752be3029144890d04fb77ab317..c9f1a8cd5f2ac29cc45f23be8f887d9a390db530 100644 --- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c +++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c @@ -214,9 +214,8 @@ sun4i_hdmi_connector_detect(struct drm_connector *connector, bool force) struct sun4i_hdmi *hdmi = drm_connector_to_sun4i_hdmi(connector); unsigned long reg; - if (readl_poll_timeout(hdmi->base + SUN4I_HDMI_HPD_REG, reg, - reg & SUN4I_HDMI_HPD_HIGH, - 0, 500000)) { + reg = readl(hdmi->base + SUN4I_HDMI_HPD_REG); + if (!(reg & SUN4I_HDMI_HPD_HIGH)) { cec_phys_addr_invalidate(hdmi->cec_adap); return connector_status_disconnected; } diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c index f9cde03030fd9042bea5a4528854b786cdd0451a..c2a9dcf6f4907f39bffce0408893034c16822469 100644 --- a/drivers/gpu/host1x/bus.c +++ b/drivers/gpu/host1x/bus.c @@ -615,8 +615,17 @@ EXPORT_SYMBOL(host1x_driver_register_full); */ void host1x_driver_unregister(struct host1x_driver *driver) { + struct host1x *host1x; + driver_unregister(&driver->driver); + mutex_lock(&devices_lock); + + list_for_each_entry(host1x, &devices, list) + host1x_detach_driver(host1x, driver); + + mutex_unlock(&devices_lock); + mutex_lock(&drivers_lock); list_del_init(&driver->list); mutex_unlock(&drivers_lock); diff --git a/drivers/gpu/msm/adreno-gpulist.h b/drivers/gpu/msm/adreno-gpulist.h index 645f10d2a6e2e2f661a09a90aa488f58660bdb99..1a7df11bda16f3264c03d5e77352dfb25bc4b94e 100644 --- a/drivers/gpu/msm/adreno-gpulist.h +++ b/drivers/gpu/msm/adreno-gpulist.h @@ -466,7 +466,7 @@ static const struct adreno_gpu_core adreno_gpulist[] = { .patchid = ANY_ID, .features = ADRENO_64BIT | ADRENO_RPMH | ADRENO_GPMU | ADRENO_CONTENT_PROTECTION | ADRENO_IOCOHERENT | - ADRENO_IFPC, + ADRENO_IFPC | ADRENO_PREEMPTION, .sqefw_name = "a630_sqe.fw", .zap_name = "a640_zap", .gpudev = &adreno_a6xx_gpudev, diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c index c1ed012860a600029becf34a30a816c26d9bbc1b..3a47c03c9cd7d67173b3ac2331d66d3559277391 100644 --- a/drivers/gpu/msm/adreno.c +++ b/drivers/gpu/msm/adreno.c @@ -2420,6 +2420,14 @@ int adreno_reset(struct kgsl_device *device, int fault) } } if (ret) { + unsigned long flags = device->pwrctrl.ctrl_flags; + + /* + * Clear ctrl_flags to ensure clocks and regulators are + * turned off + */ + device->pwrctrl.ctrl_flags = 0; + /* If soft reset failed/skipped, then pull the power */ kgsl_pwrctrl_change_state(device, KGSL_STATE_INIT); /* since device is officially off now clear start bit */ @@ -2437,6 +2445,8 @@ int adreno_reset(struct kgsl_device *device, int fault) break; } } + + device->pwrctrl.ctrl_flags = flags; } if (ret) return ret; diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c index 0c9f9e7398110b6ae57ca07d08a5a8b96f8b3d8c..df601d4bb3d756c3c80a9e27ded18bffa3151903 100644 --- a/drivers/gpu/msm/adreno_a6xx.c +++ b/drivers/gpu/msm/adreno_a6xx.c @@ -1513,11 +1513,15 @@ static int a6xx_reset(struct kgsl_device *device, int fault) struct adreno_device *adreno_dev = ADRENO_DEVICE(device); int ret = -EINVAL; int i = 0; + unsigned long flags = device->pwrctrl.ctrl_flags; /* Use the regular reset sequence for No GMU */ if (!gmu_core_gpmu_isenabled(device)) return adreno_reset(device, fault); + /* Clear ctrl_flags to ensure clocks and regulators are turned off */ + device->pwrctrl.ctrl_flags = 0; + /* Transition from ACTIVE to RESET state */ kgsl_pwrctrl_change_state(device, KGSL_STATE_RESET); @@ -1539,6 +1543,8 @@ static int a6xx_reset(struct kgsl_device *device, int fault) if (i != 0) KGSL_DRV_WARN(device, "Device hard reset tried %d tries\n", i); + device->pwrctrl.ctrl_flags = flags; + /* * If active_cnt is non-zero then the system was active before * going into a reset - put it back in that state diff --git a/drivers/gpu/msm/adreno_a6xx_gmu.c b/drivers/gpu/msm/adreno_a6xx_gmu.c index fdcf0e9e263f824e939119d9dfa82d267e52dc8b..8d3d331de3ef471f230b26005c20b72fe4bf666e 100644 --- a/drivers/gpu/msm/adreno_a6xx_gmu.c +++ b/drivers/gpu/msm/adreno_a6xx_gmu.c @@ -1144,6 +1144,39 @@ static int a6xx_gmu_load_firmware(struct kgsl_device *device) } #define A6XX_VBIF_XIN_HALT_CTRL1_ACKS (BIT(0) | BIT(1) | BIT(2) | BIT(3)) +static void do_gbif_halt(struct kgsl_device *device, u32 reg, u32 ack_reg, + u32 mask, const char *client) +{ + u32 ack; + unsigned long t; + + kgsl_regwrite(device, reg, mask); + + t = jiffies + msecs_to_jiffies(100); + do { + kgsl_regread(device, ack_reg, &ack); + if ((ack & mask) == mask) + return; + + /* + * If we are attempting recovery in case of stall-on-fault + * then the halt sequence will not complete as long as SMMU + * is stalled. + */ + kgsl_mmu_pagefault_resume(&device->mmu); + + usleep_range(10, 100); + } while (!time_after(jiffies, t)); + + /* Check one last time */ + kgsl_mmu_pagefault_resume(&device->mmu); + + kgsl_regread(device, ack_reg, &ack); + if ((ack & mask) == mask) + return; + + dev_err(device->dev, "%s GBIF halt timed out\n", client); +} static void a6xx_llm_glm_handshake(struct kgsl_device *device) { @@ -1209,6 +1242,27 @@ static int a6xx_gmu_suspend(struct kgsl_device *device) gmu_core_regwrite(device, A6XX_GMU_CM3_SYSRESET, 1); + if (adreno_has_gbif(adreno_dev)) { + struct adreno_gpudev *gpudev = + ADRENO_GPU_DEVICE(adreno_dev); + + /* Halt GX traffic */ + if (a6xx_gmu_gx_is_on(adreno_dev)) + do_gbif_halt(device, A6XX_RBBM_GBIF_HALT, + A6XX_RBBM_GBIF_HALT_ACK, + gpudev->gbif_gx_halt_mask, + "GX"); + /* Halt CX traffic */ + do_gbif_halt(device, A6XX_GBIF_HALT, A6XX_GBIF_HALT_ACK, + gpudev->gbif_arb_halt_mask, "CX"); + } + + if (a6xx_gmu_gx_is_on(adreno_dev)) + kgsl_regwrite(device, A6XX_RBBM_SW_RESET_CMD, 0x1); + + /* Allow the software reset to complete */ + udelay(100); + /* * This is based on the assumption that GMU is the only one controlling * the GX HS. This code path is the only client voting for GX through diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c index 4da9aaa971f727064a23f9af4f83943148d79915..f8ef12a4640dc7041c76915e31210e90939fdd0d 100644 --- a/drivers/gpu/msm/kgsl_iommu.c +++ b/drivers/gpu/msm/kgsl_iommu.c @@ -854,8 +854,6 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain, if (pt->name == KGSL_MMU_SECURE_PT) ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_SECURE]; - ctx->fault = 1; - if (test_bit(KGSL_FT_PAGEFAULT_GPUHALT_ENABLE, &adreno_dev->ft_pf_policy) && (flags & IOMMU_FAULT_TRANSACTION_STALLED)) { @@ -948,6 +946,9 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain, sctlr_val &= ~(0x1 << KGSL_IOMMU_SCTLR_CFIE_SHIFT); KGSL_IOMMU_SET_CTX_REG(ctx, SCTLR, sctlr_val); + /* This is used by reset/recovery path */ + ctx->stalled_on_fault = true; + adreno_set_gpu_fault(adreno_dev, ADRENO_IOMMU_PAGE_FAULT); /* Go ahead with recovery*/ adreno_dispatcher_schedule(device); @@ -2076,7 +2077,7 @@ static void kgsl_iommu_clear_fsr(struct kgsl_mmu *mmu) struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER]; unsigned int sctlr_val; - if (ctx->default_pt != NULL) { + if (ctx->default_pt != NULL && ctx->stalled_on_fault) { kgsl_iommu_enable_clk(mmu); KGSL_IOMMU_SET_CTX_REG(ctx, FSR, 0xffffffff); /* @@ -2093,6 +2094,7 @@ static void kgsl_iommu_clear_fsr(struct kgsl_mmu *mmu) */ wmb(); kgsl_iommu_disable_clk(mmu); + ctx->stalled_on_fault = false; } } @@ -2100,36 +2102,31 @@ static void kgsl_iommu_pagefault_resume(struct kgsl_mmu *mmu) { struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu); struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER]; - unsigned int fsr_val; - - if (ctx->default_pt != NULL && ctx->fault) { - while (1) { - KGSL_IOMMU_SET_CTX_REG(ctx, FSR, 0xffffffff); - /* - * Make sure the above register write - * is not reordered across the barrier - * as we use writel_relaxed to write it. - */ - wmb(); - - /* - * Write 1 to RESUME.TnR to terminate the - * stalled transaction. - */ - KGSL_IOMMU_SET_CTX_REG(ctx, RESUME, 1); - /* - * Make sure the above register writes - * are not reordered across the barrier - * as we use writel_relaxed to write them - */ - wmb(); - - udelay(5); - fsr_val = KGSL_IOMMU_GET_CTX_REG(ctx, FSR); - if (!(fsr_val & (1 << KGSL_IOMMU_FSR_SS_SHIFT))) - break; - } - ctx->fault = 0; + + if (ctx->default_pt != NULL && ctx->stalled_on_fault) { + /* + * This will only clear fault bits in FSR. FSR.SS will still + * be set. Writing to RESUME (below) is the only way to clear + * FSR.SS bit. + */ + KGSL_IOMMU_SET_CTX_REG(ctx, FSR, 0xffffffff); + /* + * Make sure the above register write is not reordered across + * the barrier as we use writel_relaxed to write it. + */ + wmb(); + + /* + * Write 1 to RESUME.TnR to terminate the stalled transaction. + * This will also allow the SMMU to process new transactions. + */ + KGSL_IOMMU_SET_CTX_REG(ctx, RESUME, 1); + /* + * Make sure the above register writes are not reordered across + * the barrier as we use writel_relaxed to write them. + */ + wmb(); + } } diff --git a/drivers/gpu/msm/kgsl_iommu.h b/drivers/gpu/msm/kgsl_iommu.h index 4660717b99b1fae1fe857f8397522a0badb01646..8ee77b3adb1872efa2c69a22dc04530c594d72d2 100644 --- a/drivers/gpu/msm/kgsl_iommu.h +++ b/drivers/gpu/msm/kgsl_iommu.h @@ -100,8 +100,8 @@ enum kgsl_iommu_context_id { * @cb_num: The hardware context bank number, used for calculating register * offsets. * @kgsldev: The kgsl device that uses this context. - * @fault: Flag when set indicates that this iommu device has caused a page - * fault + * @stalled_on_fault: Flag when set indicates that this iommu device is stalled + * on a page fault * @gpu_offset: Offset of this context bank in the GPU register space * @default_pt: The default pagetable for this context, * it may be changed by self programming. @@ -112,7 +112,7 @@ struct kgsl_iommu_context { enum kgsl_iommu_context_id id; unsigned int cb_num; struct kgsl_device *kgsldev; - int fault; + bool stalled_on_fault; void __iomem *regbase; unsigned int gpu_offset; struct kgsl_pagetable *default_pt; diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c index 3cd153c6d271a5f451c3a6e6ea7893060e4441c5..f188c85b3b7abc3a54d6eaa8f9983ce66d157faf 100644 --- a/drivers/gpu/vga/vga_switcheroo.c +++ b/drivers/gpu/vga/vga_switcheroo.c @@ -92,7 +92,8 @@ * struct vga_switcheroo_client - registered client * @pdev: client pci device * @fb_info: framebuffer to which console is remapped on switching - * @pwr_state: current power state + * @pwr_state: current power state if manual power control is used. + * For driver power control, call vga_switcheroo_pwr_state(). * @ops: client callbacks * @id: client identifier. Determining the id requires the handler, * so gpus are initially assigned VGA_SWITCHEROO_UNKNOWN_ID @@ -104,8 +105,7 @@ * @list: client list * * Registered client. A client can be either a GPU or an audio device on a GPU. - * For audio clients, the @fb_info, @active and @driver_power_control members - * are bogus. + * For audio clients, the @fb_info and @active members are bogus. */ struct vga_switcheroo_client { struct pci_dev *pdev; @@ -331,8 +331,8 @@ EXPORT_SYMBOL(vga_switcheroo_register_client); * @ops: client callbacks * @id: client identifier * - * Register audio client (audio device on a GPU). The power state of the - * client is assumed to be ON. Beforehand, vga_switcheroo_client_probe_defer() + * Register audio client (audio device on a GPU). The client is assumed + * to use runtime PM. Beforehand, vga_switcheroo_client_probe_defer() * shall be called to ensure that all prerequisites are met. * * Return: 0 on success, -ENOMEM on memory allocation error. @@ -341,7 +341,7 @@ int vga_switcheroo_register_audio_client(struct pci_dev *pdev, const struct vga_switcheroo_client_ops *ops, enum vga_switcheroo_client_id id) { - return register_client(pdev, ops, id | ID_BIT_AUDIO, false, false); + return register_client(pdev, ops, id | ID_BIT_AUDIO, false, true); } EXPORT_SYMBOL(vga_switcheroo_register_audio_client); @@ -406,6 +406,19 @@ bool vga_switcheroo_client_probe_defer(struct pci_dev *pdev) } EXPORT_SYMBOL(vga_switcheroo_client_probe_defer); +static enum vga_switcheroo_state +vga_switcheroo_pwr_state(struct vga_switcheroo_client *client) +{ + if (client->driver_power_control) + if (pm_runtime_enabled(&client->pdev->dev) && + pm_runtime_active(&client->pdev->dev)) + return VGA_SWITCHEROO_ON; + else + return VGA_SWITCHEROO_OFF; + else + return client->pwr_state; +} + /** * vga_switcheroo_get_client_state() - obtain power state of a given client * @pdev: client pci device @@ -425,7 +438,7 @@ enum vga_switcheroo_state vga_switcheroo_get_client_state(struct pci_dev *pdev) if (!client) ret = VGA_SWITCHEROO_NOT_FOUND; else - ret = client->pwr_state; + ret = vga_switcheroo_pwr_state(client); mutex_unlock(&vgasr_mutex); return ret; } @@ -598,7 +611,7 @@ static int vga_switcheroo_show(struct seq_file *m, void *v) client_is_vga(client) ? "" : "-Audio", client->active ? '+' : ' ', client->driver_power_control ? "Dyn" : "", - client->pwr_state ? "Pwr" : "Off", + vga_switcheroo_pwr_state(client) ? "Pwr" : "Off", pci_name(client->pdev)); i++; } @@ -641,10 +654,8 @@ static void set_audio_state(enum vga_switcheroo_client_id id, struct vga_switcheroo_client *client; client = find_client_from_id(&vgasr_priv.clients, id | ID_BIT_AUDIO); - if (client && client->pwr_state != state) { + if (client) client->ops->set_gpu_state(client->pdev, state); - client->pwr_state = state; - } } /* stage one happens before delay */ @@ -656,7 +667,7 @@ static int vga_switchto_stage1(struct vga_switcheroo_client *new_client) if (!active) return 0; - if (new_client->pwr_state == VGA_SWITCHEROO_OFF) + if (vga_switcheroo_pwr_state(new_client) == VGA_SWITCHEROO_OFF) vga_switchon(new_client); vga_set_default_device(new_client->pdev); @@ -695,7 +706,7 @@ static int vga_switchto_stage2(struct vga_switcheroo_client *new_client) if (new_client->ops->reprobe) new_client->ops->reprobe(new_client->pdev); - if (active->pwr_state == VGA_SWITCHEROO_ON) + if (vga_switcheroo_pwr_state(active) == VGA_SWITCHEROO_ON) vga_switchoff(active); set_audio_state(new_client->id, VGA_SWITCHEROO_ON); @@ -939,11 +950,6 @@ EXPORT_SYMBOL(vga_switcheroo_process_delayed_switch); * Specifying nouveau.runpm=0, radeon.runpm=0 or amdgpu.runpm=0 on the kernel * command line disables it. * - * When the driver decides to power up or down, it notifies vga_switcheroo - * thereof so that it can (a) power the audio device on the GPU up or down, - * and (b) update its internal power state representation for the device. - * This is achieved by vga_switcheroo_set_dynamic_switch(). - * * After the GPU has been suspended, the handler needs to be called to cut * power to the GPU. Likewise it needs to reinstate power before the GPU * can resume. This is achieved by vga_switcheroo_init_domain_pm_ops(), @@ -951,8 +957,9 @@ EXPORT_SYMBOL(vga_switcheroo_process_delayed_switch); * calls to the handler. * * When the audio device resumes, the GPU needs to be woken. This is achieved - * by vga_switcheroo_init_domain_pm_optimus_hdmi_audio(), which augments the - * audio device's resume function. + * by a PCI quirk which calls device_link_add() to declare a dependency on the + * GPU. That way, the GPU is kept awake whenever and as long as the audio + * device is in use. * * On muxed machines, if the mux is initially switched to the discrete GPU, * the user ends up with a black screen when the GPU powers down after boot. @@ -978,35 +985,6 @@ static void vga_switcheroo_power_switch(struct pci_dev *pdev, vgasr_priv.handler->power_state(client->id, state); } -/** - * vga_switcheroo_set_dynamic_switch() - helper for driver power control - * @pdev: client pci device - * @dynamic: new power state - * - * Helper for GPUs whose power state is controlled by the driver's runtime pm. - * When the driver decides to power up or down, it notifies vga_switcheroo - * thereof using this helper so that it can (a) power the audio device on - * the GPU up or down, and (b) update its internal power state representation - * for the device. - */ -void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, - enum vga_switcheroo_state dynamic) -{ - struct vga_switcheroo_client *client; - - mutex_lock(&vgasr_mutex); - client = find_client_from_pci(&vgasr_priv.clients, pdev); - if (!client || !client->driver_power_control) { - mutex_unlock(&vgasr_mutex); - return; - } - - client->pwr_state = dynamic; - set_audio_state(client->id, dynamic); - mutex_unlock(&vgasr_mutex); -} -EXPORT_SYMBOL(vga_switcheroo_set_dynamic_switch); - /* switcheroo power domain */ static int vga_switcheroo_runtime_suspend(struct device *dev) { @@ -1076,69 +1054,3 @@ void vga_switcheroo_fini_domain_pm_ops(struct device *dev) dev_pm_domain_set(dev, NULL); } EXPORT_SYMBOL(vga_switcheroo_fini_domain_pm_ops); - -static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev) -{ - struct pci_dev *pdev = to_pci_dev(dev); - struct vga_switcheroo_client *client; - struct device *video_dev = NULL; - int ret; - - /* we need to check if we have to switch back on the video - * device so the audio device can come back - */ - mutex_lock(&vgasr_mutex); - list_for_each_entry(client, &vgasr_priv.clients, list) { - if (PCI_SLOT(client->pdev->devfn) == PCI_SLOT(pdev->devfn) && - client_is_vga(client)) { - video_dev = &client->pdev->dev; - break; - } - } - mutex_unlock(&vgasr_mutex); - - if (video_dev) { - ret = pm_runtime_get_sync(video_dev); - if (ret && ret != 1) - return ret; - } - ret = dev->bus->pm->runtime_resume(dev); - - /* put the reference for the gpu */ - if (video_dev) { - pm_runtime_mark_last_busy(video_dev); - pm_runtime_put_autosuspend(video_dev); - } - return ret; -} - -/** - * vga_switcheroo_init_domain_pm_optimus_hdmi_audio() - helper for driver - * power control - * @dev: audio client device - * @domain: power domain - * - * Helper for GPUs whose power state is controlled by the driver's runtime pm. - * When the audio device resumes, the GPU needs to be woken. This helper - * augments the audio device's resume function to do that. - * - * Return: 0 on success, -EINVAL if no power management operations are - * defined for this device. - */ -int -vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, - struct dev_pm_domain *domain) -{ - /* copy over all the bus versions */ - if (dev->bus && dev->bus->pm) { - domain->ops = *dev->bus->pm; - domain->ops.runtime_resume = - vga_switcheroo_runtime_resume_hdmi_audio; - - dev_pm_domain_set(dev, domain); - return 0; - } - dev_pm_domain_set(dev, NULL); - return -EINVAL; -} -EXPORT_SYMBOL(vga_switcheroo_init_domain_pm_optimus_hdmi_audio); diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c index 8ab8f2350bbcdc7f28cccfc15d195807c503c32f..b58ab769aa7b3e6474c888c50ebeed3abe025afc 100644 --- a/drivers/hid/hid-apple.c +++ b/drivers/hid/hid-apple.c @@ -57,6 +57,7 @@ MODULE_PARM_DESC(swap_opt_cmd, "Swap the Option (\"Alt\") and Command (\"Flag\") struct apple_sc { unsigned long quirks; unsigned int fn_on; + unsigned int fn_found; DECLARE_BITMAP(pressed_numlock, KEY_CNT); }; @@ -342,12 +343,15 @@ static int apple_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { + struct apple_sc *asc = hid_get_drvdata(hdev); + if (usage->hid == (HID_UP_CUSTOM | 0x0003) || usage->hid == (HID_UP_MSVENDOR | 0x0003) || usage->hid == (HID_UP_HPVENDOR2 | 0x0003)) { /* The fn key on Apple USB keyboards */ set_bit(EV_REP, hi->input->evbit); hid_map_usage_clear(hi, usage, bit, max, EV_KEY, KEY_FN); + asc->fn_found = true; apple_setup_input(hi->input); return 1; } @@ -374,6 +378,19 @@ static int apple_input_mapped(struct hid_device *hdev, struct hid_input *hi, return 0; } +static int apple_input_configured(struct hid_device *hdev, + struct hid_input *hidinput) +{ + struct apple_sc *asc = hid_get_drvdata(hdev); + + if ((asc->quirks & APPLE_HAS_FN) && !asc->fn_found) { + hid_info(hdev, "Fn key not found (Apple Wireless Keyboard clone?), disabling Fn key handling\n"); + asc->quirks = 0; + } + + return 0; +} + static int apple_probe(struct hid_device *hdev, const struct hid_device_id *id) { @@ -588,6 +605,7 @@ static struct hid_driver apple_driver = { .event = apple_event, .input_mapping = apple_input_mapping, .input_mapped = apple_input_mapped, + .input_configured = apple_input_configured, }; module_hid_driver(apple_driver); diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 1f2e9c9279e86d85710b35130c6ee4fcf5270ddc..b8ada634c977067aa1cc26eca49801366c03d6e1 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -2456,7 +2456,6 @@ static const struct hid_device_id hid_have_special_driver[] = { #if IS_ENABLED(CONFIG_HID_STEAM) { HID_USB_DEVICE(USB_VENDOR_ID_VALVE, USB_DEVICE_ID_STEAM_CONTROLLER) }, { HID_USB_DEVICE(USB_VENDOR_ID_VALVE, USB_DEVICE_ID_STEAM_CONTROLLER_WIRELESS) }, - { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_VALVE, USB_DEVICE_ID_STEAM_CONTROLLER_BT) }, #endif #if IS_ENABLED(CONFIG_HID_WALTOP) { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_5_8_INCH) }, diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index a3e5c4400091fb2dd0f3033295051244eeb32738..fb7422004360b2f05d4bc56ddbef8596de67a29b 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -369,6 +369,7 @@ #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7349 0x7349 #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_73F7 0x73f7 #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001 0xa001 +#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_C002 0xc002 #define USB_VENDOR_ID_ELAN 0x04f3 @@ -1002,7 +1003,6 @@ #define USB_VENDOR_ID_VALVE 0x28de #define USB_DEVICE_ID_STEAM_CONTROLLER 0x1102 #define USB_DEVICE_ID_STEAM_CONTROLLER_WIRELESS 0x1142 -#define USB_DEVICE_ID_STEAM_CONTROLLER_BT 0x1106 #define USB_VENDOR_ID_STEELSERIES 0x1038 #define USB_DEVICE_ID_STEELSERIES_SRWS1 0x1410 diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c index 42ed887ba0be5d79d67738bcc1520eb8fb8a0190..78e37bb25aeedf8dbb629aba1dd8c36eb919a6d1 100644 --- a/drivers/hid/hid-magicmouse.c +++ b/drivers/hid/hid-magicmouse.c @@ -452,6 +452,12 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd __set_bit(MSC_RAW, input->mscbit); } + /* + * hid-input may mark device as using autorepeat, but neither + * the trackpad, nor the mouse actually want it. + */ + __clear_bit(EV_REP, input->evbit); + return 0; } diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index 07d92d4a9f7c8ca4a6bb4b93f4a08237d220aae1..db29bf539a4b2619bfc77e38dbccb0cfc7e9e044 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c @@ -1550,6 +1550,9 @@ static const struct hid_device_id mt_devices[] = { { .driver_data = MT_CLS_EGALAX_SERIAL, MT_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001) }, + { .driver_data = MT_CLS_EGALAX, + MT_USB_DEVICE(USB_VENDOR_ID_DWAV, + USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_C002) }, /* Elitegroup panel */ { .driver_data = MT_CLS_SERIAL, diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c index c8b07a182c0bfadb97d678deae6d6690465acd5f..f8fb8a61fb90e1b365515e7b69f25ba2a535c70d 100644 --- a/drivers/hid/hid-sony.c +++ b/drivers/hid/hid-sony.c @@ -837,6 +837,23 @@ static u8 *sony_report_fixup(struct hid_device *hdev, u8 *rdesc, if (sc->quirks & PS3REMOTE) return ps3remote_fixup(hdev, rdesc, rsize); + /* + * Some knock-off USB dongles incorrectly report their button count + * as 13 instead of 16 causing three non-functional buttons. + */ + if ((sc->quirks & SIXAXIS_CONTROLLER_USB) && *rsize >= 45 && + /* Report Count (13) */ + rdesc[23] == 0x95 && rdesc[24] == 0x0D && + /* Usage Maximum (13) */ + rdesc[37] == 0x29 && rdesc[38] == 0x0D && + /* Report Count (3) */ + rdesc[43] == 0x95 && rdesc[44] == 0x03) { + hid_info(hdev, "Fixing up USB dongle report descriptor\n"); + rdesc[24] = 0x10; + rdesc[38] = 0x10; + rdesc[44] = 0x00; + } + return rdesc; } diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c index 44e1eefc5b24faa9a0f0773e10a39924b6024fbe..a4a6c90c8134fd91c0b6e8a23cc92030fb6aab86 100644 --- a/drivers/hid/hid-steam.c +++ b/drivers/hid/hid-steam.c @@ -768,8 +768,12 @@ static int steam_probe(struct hid_device *hdev, if (steam->quirks & STEAM_QUIRK_WIRELESS) { hid_info(hdev, "Steam wireless receiver connected"); + /* If using a wireless adaptor ask for connection status */ + steam->connected = false; steam_request_conn_status(steam); } else { + /* A wired connection is always present */ + steam->connected = true; ret = steam_register(steam); if (ret) { hid_err(hdev, diff --git a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c index 95052373a828240809c25581b944f1feb2587944..f98c1e1b1dbdc1c5538a52cbeea2e0c8a5733709 100644 --- a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c +++ b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c @@ -373,6 +373,14 @@ static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = { }, .driver_data = (void *)&sipodev_desc }, + { + .ident = "Mediacom FlexBook edge 13", + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "MEDIACOM"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "FlexBook_edge13-M-FBE13"), + }, + .driver_data = (void *)&sipodev_desc + }, { .ident = "Odys Winbook 13", .matches = { @@ -381,6 +389,14 @@ static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = { }, .driver_data = (void *)&sipodev_desc }, + { + .ident = "Schneider SCL142ALM", + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "SCHNEIDER"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "SCL142ALM"), + }, + .driver_data = (void *)&sipodev_desc + }, { } /* Terminate list */ }; diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c index 045b5da9b992873ce74f5d401de34855b93a6f8e..98916fb4191a75b368dafa19689366505469f3b4 100644 --- a/drivers/hid/usbhid/hid-core.c +++ b/drivers/hid/usbhid/hid-core.c @@ -680,16 +680,21 @@ static int usbhid_open(struct hid_device *hid) struct usbhid_device *usbhid = hid->driver_data; int res; + mutex_lock(&usbhid->mutex); + set_bit(HID_OPENED, &usbhid->iofl); - if (hid->quirks & HID_QUIRK_ALWAYS_POLL) - return 0; + if (hid->quirks & HID_QUIRK_ALWAYS_POLL) { + res = 0; + goto Done; + } res = usb_autopm_get_interface(usbhid->intf); /* the device must be awake to reliably request remote wakeup */ if (res < 0) { clear_bit(HID_OPENED, &usbhid->iofl); - return -EIO; + res = -EIO; + goto Done; } usbhid->intf->needs_remote_wakeup = 1; @@ -723,6 +728,9 @@ static int usbhid_open(struct hid_device *hid) msleep(50); clear_bit(HID_RESUME_RUNNING, &usbhid->iofl); + + Done: + mutex_unlock(&usbhid->mutex); return res; } @@ -730,6 +738,8 @@ static void usbhid_close(struct hid_device *hid) { struct usbhid_device *usbhid = hid->driver_data; + mutex_lock(&usbhid->mutex); + /* * Make sure we don't restart data acquisition due to * a resumption we no longer care about by avoiding racing @@ -741,12 +751,13 @@ static void usbhid_close(struct hid_device *hid) clear_bit(HID_IN_POLLING, &usbhid->iofl); spin_unlock_irq(&usbhid->lock); - if (hid->quirks & HID_QUIRK_ALWAYS_POLL) - return; + if (!(hid->quirks & HID_QUIRK_ALWAYS_POLL)) { + hid_cancel_delayed_stuff(usbhid); + usb_kill_urb(usbhid->urbin); + usbhid->intf->needs_remote_wakeup = 0; + } - hid_cancel_delayed_stuff(usbhid); - usb_kill_urb(usbhid->urbin); - usbhid->intf->needs_remote_wakeup = 0; + mutex_unlock(&usbhid->mutex); } /* @@ -1056,6 +1067,8 @@ static int usbhid_start(struct hid_device *hid) unsigned int n, insize = 0; int ret; + mutex_lock(&usbhid->mutex); + clear_bit(HID_DISCONNECTED, &usbhid->iofl); usbhid->bufsize = HID_MIN_BUFFER_SIZE; @@ -1170,6 +1183,8 @@ static int usbhid_start(struct hid_device *hid) usbhid_set_leds(hid); device_set_wakeup_enable(&dev->dev, 1); } + + mutex_unlock(&usbhid->mutex); return 0; fail: @@ -1180,6 +1195,7 @@ static int usbhid_start(struct hid_device *hid) usbhid->urbout = NULL; usbhid->urbctrl = NULL; hid_free_buffers(dev, hid); + mutex_unlock(&usbhid->mutex); return ret; } @@ -1195,6 +1211,8 @@ static void usbhid_stop(struct hid_device *hid) usbhid->intf->needs_remote_wakeup = 0; } + mutex_lock(&usbhid->mutex); + clear_bit(HID_STARTED, &usbhid->iofl); spin_lock_irq(&usbhid->lock); /* Sync with error and led handlers */ set_bit(HID_DISCONNECTED, &usbhid->iofl); @@ -1215,6 +1233,8 @@ static void usbhid_stop(struct hid_device *hid) usbhid->urbout = NULL; hid_free_buffers(hid_to_usb_dev(hid), hid); + + mutex_unlock(&usbhid->mutex); } static int usbhid_power(struct hid_device *hid, int lvl) @@ -1375,6 +1395,7 @@ static int usbhid_probe(struct usb_interface *intf, const struct usb_device_id * INIT_WORK(&usbhid->reset_work, hid_reset); setup_timer(&usbhid->io_retry, hid_retry_timeout, (unsigned long) hid); spin_lock_init(&usbhid->lock); + mutex_init(&usbhid->mutex); ret = hid_add_device(hid); if (ret) { diff --git a/drivers/hid/usbhid/usbhid.h b/drivers/hid/usbhid/usbhid.h index da9c61d54be6c9d9018431f43a44a761e070c247..caa0ee63958153a684fb4183832c3faa24b96218 100644 --- a/drivers/hid/usbhid/usbhid.h +++ b/drivers/hid/usbhid/usbhid.h @@ -93,6 +93,7 @@ struct usbhid_device { dma_addr_t outbuf_dma; /* Output buffer dma */ unsigned long last_out; /* record of last output for timeouts */ + struct mutex mutex; /* start/stop/open/close */ spinlock_t lock; /* fifo spinlock */ unsigned long iofl; /* I/O flags (CTRL_RUNNING, OUT_RUNNING) */ struct timer_list io_retry; /* Retry timer */ diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c index c4d4464c7b21d1a4d6b82490cbc0a64316d69fb4..bf8e727988c41b8ecbbbe35c275e8c69255081df 100644 --- a/drivers/hid/wacom_sys.c +++ b/drivers/hid/wacom_sys.c @@ -132,9 +132,11 @@ static void wacom_feature_mapping(struct hid_device *hdev, data[0] = field->report->id; ret = wacom_get_report(hdev, HID_FEATURE_REPORT, data, n, WAC_CMD_RETRIES); - if (ret == n) { + if (ret == n && features->type == HID_GENERIC) { ret = hid_report_raw_event(hdev, HID_FEATURE_REPORT, data, n, 0); + } else if (ret == 2 && features->type != HID_GENERIC) { + features->touch_max = data[1]; } else { features->touch_max = 16; hid_warn(hdev, "wacom_feature_mapping: " diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c index ba3af4505d8fb1a4632f5a340ce3c97544492b3e..e40d8907853bf35ddcf3fa19b2b003e888afe30e 100644 --- a/drivers/hwmon/acpi_power_meter.c +++ b/drivers/hwmon/acpi_power_meter.c @@ -895,7 +895,7 @@ static int acpi_power_meter_add(struct acpi_device *device) res = setup_attrs(resource); if (res) - goto exit_free; + goto exit_free_capability; resource->hwmon_dev = hwmon_device_register(&device->dev); if (IS_ERR(resource->hwmon_dev)) { @@ -908,6 +908,8 @@ static int acpi_power_meter_add(struct acpi_device *device) exit_remove: remove_attrs(resource); +exit_free_capability: + free_capabilities(resource); exit_free: kfree(resource); exit: diff --git a/drivers/hwmon/aspeed-pwm-tacho.c b/drivers/hwmon/aspeed-pwm-tacho.c index 69b97d45e3cbb459ccdf73321dc4c3ef8ae23488..e4337e9dda4447f14108b57cce00f5f0059d95ef 100644 --- a/drivers/hwmon/aspeed-pwm-tacho.c +++ b/drivers/hwmon/aspeed-pwm-tacho.c @@ -878,6 +878,8 @@ static int aspeed_create_fan(struct device *dev, ret = of_property_read_u32(child, "reg", &pwm_port); if (ret) return ret; + if (pwm_port >= ARRAY_SIZE(pwm_port_params)) + return -EINVAL; aspeed_create_pwm_port(priv, (u8)pwm_port); ret = of_property_count_u8_elems(child, "cooling-levels"); diff --git a/drivers/hwmon/da9052-hwmon.c b/drivers/hwmon/da9052-hwmon.c index a973eb6a28908be2c17092ad4a1169e288a6f8e8..9e44d2385e6f906fc0a261e07487edb70752ee8a 100644 --- a/drivers/hwmon/da9052-hwmon.c +++ b/drivers/hwmon/da9052-hwmon.c @@ -250,9 +250,9 @@ static ssize_t da9052_read_tsi(struct device *dev, int channel = to_sensor_dev_attr(devattr)->index; int ret; - mutex_lock(&hwmon->hwmon_lock); + mutex_lock(&hwmon->da9052->auxadc_lock); ret = __da9052_read_tsi(dev, channel); - mutex_unlock(&hwmon->hwmon_lock); + mutex_unlock(&hwmon->da9052->auxadc_lock); if (ret < 0) return ret; diff --git a/drivers/hwmon/emc2103.c b/drivers/hwmon/emc2103.c index 1ed9a7aa953dbb1560ae1fb626b2d529f6d2ecc9..f4985622b179fc5c1b18b3c9b40afcdf2de0107b 100644 --- a/drivers/hwmon/emc2103.c +++ b/drivers/hwmon/emc2103.c @@ -454,7 +454,7 @@ static ssize_t pwm1_enable_store(struct device *dev, } result = read_u8_from_i2c(client, REG_FAN_CONF1, &conf_reg); - if (result) { + if (result < 0) { count = result; goto err; } diff --git a/drivers/hwmon/max6697.c b/drivers/hwmon/max6697.c index 221fd149205760371a0c128710c5773588ad1dc1..6df28fe0577dac0f42732c7f82d7cd7ea4425da2 100644 --- a/drivers/hwmon/max6697.c +++ b/drivers/hwmon/max6697.c @@ -47,8 +47,9 @@ static const u8 MAX6697_REG_CRIT[] = { * Map device tree / platform data register bit map to chip bit map. * Applies to alert register and over-temperature register. */ -#define MAX6697_MAP_BITS(reg) ((((reg) & 0x7e) >> 1) | \ +#define MAX6697_ALERT_MAP_BITS(reg) ((((reg) & 0x7e) >> 1) | \ (((reg) & 0x01) << 6) | ((reg) & 0x80)) +#define MAX6697_OVERT_MAP_BITS(reg) (((reg) >> 1) | (((reg) & 0x01) << 7)) #define MAX6697_REG_STAT(n) (0x44 + (n)) @@ -587,12 +588,12 @@ static int max6697_init_chip(struct max6697_data *data, return ret; ret = i2c_smbus_write_byte_data(client, MAX6697_REG_ALERT_MASK, - MAX6697_MAP_BITS(pdata->alert_mask)); + MAX6697_ALERT_MAP_BITS(pdata->alert_mask)); if (ret < 0) return ret; ret = i2c_smbus_write_byte_data(client, MAX6697_REG_OVERT_MASK, - MAX6697_MAP_BITS(pdata->over_temperature_mask)); + MAX6697_OVERT_MAP_BITS(pdata->over_temperature_mask)); if (ret < 0) return ret; diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c index 96e50fd70f2c7ec34f8d794e01bdea505d217407..9cbb4f11f9e3d40a6632650720b4bbfa3e418ac0 100644 --- a/drivers/hwtracing/coresight/coresight-tmc.c +++ b/drivers/hwtracing/coresight/coresight-tmc.c @@ -187,7 +187,7 @@ static int tmc_read_prepare(struct tmc_drvdata *drvdata) { int ret = 0; - if (!drvdata->enable) + if (!drvdata->enable || !drvdata->csdev->enable) return -EPERM; switch (drvdata->config_type) { @@ -212,6 +212,9 @@ static int tmc_read_unprepare(struct tmc_drvdata *drvdata) { int ret = 0; + if (!drvdata->csdev->enable) + return -EPERM; + switch (drvdata->config_type) { case TMC_CONFIG_TYPE_ETB: case TMC_CONFIG_TYPE_ETF: diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c index 0c05b8236500d20cb22b8b0f8efed828dfcbaff6..632bce44cff21316d490d9f866cd8896a09d49bb 100644 --- a/drivers/hwtracing/coresight/coresight.c +++ b/drivers/hwtracing/coresight/coresight.c @@ -1398,6 +1398,8 @@ struct coresight_device *coresight_register(struct coresight_desc *desc) } for (i = 0; i < csdev->nr_outport; i++) { + if (desc->pdata->child_names[i] == NULL) + continue; conns[i].outport = desc->pdata->outports[i]; conns[i].child_name = desc->pdata->child_names[i]; conns[i].child_port = desc->pdata->child_ports[i]; diff --git a/drivers/hwtracing/coresight/of_coresight.c b/drivers/hwtracing/coresight/of_coresight.c index 3374394a4df40ffdf458266128393de278ef6957..be1ce3d943e091920753606a5ce34a1731228ccb 100644 --- a/drivers/hwtracing/coresight/of_coresight.c +++ b/drivers/hwtracing/coresight/of_coresight.c @@ -58,6 +58,7 @@ static void of_coresight_get_ports(const struct device_node *node, struct device_node *ep = NULL; int in = 0, out = 0; struct device_node *ports = NULL, *port = NULL; + struct of_endpoint endpoint; ports = of_get_child_by_name(node, "ports"); port = of_get_child_by_name(node, "port"); @@ -70,10 +71,15 @@ static void of_coresight_get_ports(const struct device_node *node, if (!ep) break; + if (of_graph_parse_endpoint(ep, &endpoint)) + continue; + if (of_property_read_bool(ep, "slave-mode")) - in++; + in = (endpoint.port + 1 > in) ? + endpoint.port + 1 : in; else - out++; + out = (endpoint.port + 1) > out ? + endpoint.port + 1 : out; } while (ep); diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c index b8cbd26b60e188b9db7ff7f81b6c527aaba4d360..99ef61de9b1e7290968465fc2cb36a35658b49db 100644 --- a/drivers/hwtracing/intel_th/pci.c +++ b/drivers/hwtracing/intel_th/pci.c @@ -213,11 +213,21 @@ static const struct pci_device_id intel_th_pci_id_table[] = { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa0a6), .driver_data = (kernel_ulong_t)&intel_th_2x, }, + { + /* Tiger Lake PCH-H */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x43a6), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, { /* Jasper Lake PCH */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4da6), .driver_data = (kernel_ulong_t)&intel_th_2x, }, + { + /* Jasper Lake CPU */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4e29), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, { /* Elkhart Lake CPU */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4529), @@ -228,6 +238,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4b26), .driver_data = (kernel_ulong_t)&intel_th_2x, }, + { + /* Emmitsburg PCH */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x1bcc), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, { 0 }, }; diff --git a/drivers/i2c/algos/i2c-algo-pca.c b/drivers/i2c/algos/i2c-algo-pca.c index e370804ec8bc62d10830eadba8ac93f7af50aedd..3a9db4626cb60e21f4050dfd6c482fd792fda917 100644 --- a/drivers/i2c/algos/i2c-algo-pca.c +++ b/drivers/i2c/algos/i2c-algo-pca.c @@ -326,7 +326,8 @@ static int pca_xfer(struct i2c_adapter *i2c_adap, DEB2("BUS ERROR - SDA Stuck low\n"); pca_reset(adap); goto out; - case 0x90: /* Bus error - SCL stuck low */ + case 0x78: /* Bus error - SCL stuck low (PCA9665) */ + case 0x90: /* Bus error - SCL stuck low (PCA9564) */ DEB2("BUS ERROR - SCL Stuck low\n"); pca_reset(adap); goto out; diff --git a/drivers/i2c/busses/i2c-altera.c b/drivers/i2c/busses/i2c-altera.c index 8915ee30a5b44ee3dc7ad7019d7dc0c818c1daf1..1d59eede537b105431bff834d403c244f9a3cfd3 100644 --- a/drivers/i2c/busses/i2c-altera.c +++ b/drivers/i2c/busses/i2c-altera.c @@ -81,6 +81,7 @@ * @isr_mask: cached copy of local ISR enables. * @isr_status: cached copy of local ISR status. * @lock: spinlock for IRQ synchronization. + * @isr_mutex: mutex for IRQ thread. */ struct altr_i2c_dev { void __iomem *base; @@ -97,6 +98,7 @@ struct altr_i2c_dev { u32 isr_mask; u32 isr_status; spinlock_t lock; /* IRQ synchronization */ + struct mutex isr_mutex; }; static void @@ -256,10 +258,11 @@ static irqreturn_t altr_i2c_isr(int irq, void *_dev) struct altr_i2c_dev *idev = _dev; u32 status = idev->isr_status; + mutex_lock(&idev->isr_mutex); if (!idev->msg) { dev_warn(idev->dev, "unexpected interrupt\n"); altr_i2c_int_clear(idev, ALTR_I2C_ALL_IRQ); - return IRQ_HANDLED; + goto out; } read = (idev->msg->flags & I2C_M_RD) != 0; @@ -312,6 +315,8 @@ static irqreturn_t altr_i2c_isr(int irq, void *_dev) complete(&idev->msg_complete); dev_dbg(idev->dev, "Message Complete\n"); } +out: + mutex_unlock(&idev->isr_mutex); return IRQ_HANDLED; } @@ -323,6 +328,7 @@ static int altr_i2c_xfer_msg(struct altr_i2c_dev *idev, struct i2c_msg *msg) u32 value; u8 addr = i2c_8bit_addr_from_msg(msg); + mutex_lock(&idev->isr_mutex); idev->msg = msg; idev->msg_len = msg->len; idev->buf = msg->buf; @@ -347,6 +353,7 @@ static int altr_i2c_xfer_msg(struct altr_i2c_dev *idev, struct i2c_msg *msg) altr_i2c_int_enable(idev, imask, true); altr_i2c_fill_tx_fifo(idev); } + mutex_unlock(&idev->isr_mutex); time_left = wait_for_completion_timeout(&idev->msg_complete, ALTR_I2C_XFER_TIMEOUT); @@ -420,6 +427,7 @@ static int altr_i2c_probe(struct platform_device *pdev) idev->dev = &pdev->dev; init_completion(&idev->msg_complete); spin_lock_init(&idev->lock); + mutex_init(&idev->isr_mutex); ret = device_property_read_u32(idev->dev, "fifo-size", &idev->fifo_size); diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c index bdeab0174fec2099b4572bbe9dea6bd9d32514db..0b6567d1aa390c9df0c8380df59bec36dbc7385c 100644 --- a/drivers/i2c/busses/i2c-eg20t.c +++ b/drivers/i2c/busses/i2c-eg20t.c @@ -189,6 +189,7 @@ static const struct pci_device_id pch_pcidev_id[] = { { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_I2C), 1, }, {0,} }; +MODULE_DEVICE_TABLE(pci, pch_pcidev_id); static irqreturn_t pch_i2c_handler(int irq, void *pData); diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c index 4b81dc231b18f8fe34ce95261e611d67c1ca40dc..5345b731bb7cc7a40d5d8bcda0963bfe845d7746 100644 --- a/drivers/i2c/busses/i2c-piix4.c +++ b/drivers/i2c/busses/i2c-piix4.c @@ -960,7 +960,8 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id) } if (dev->vendor == PCI_VENDOR_ID_AMD && - dev->device == PCI_DEVICE_ID_AMD_HUDSON2_SMBUS) { + (dev->device == PCI_DEVICE_ID_AMD_HUDSON2_SMBUS || + dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS)) { retval = piix4_setup_sb800(dev, id, 1); } diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c index 600d264e080c5f05031cbeb7c8f6ac2907f25ba4..e300f9530f190883b7e73f7af293a5835b5e518c 100644 --- a/drivers/i2c/busses/i2c-pxa.c +++ b/drivers/i2c/busses/i2c-pxa.c @@ -315,11 +315,10 @@ static void i2c_pxa_scream_blue_murder(struct pxa_i2c *i2c, const char *why) dev_err(dev, "IBMR: %08x IDBR: %08x ICR: %08x ISR: %08x\n", readl(_IBMR(i2c)), readl(_IDBR(i2c)), readl(_ICR(i2c)), readl(_ISR(i2c))); - dev_dbg(dev, "log: "); + dev_err(dev, "log:"); for (i = 0; i < i2c->irqlogidx; i++) - pr_debug("[%08x:%08x] ", i2c->isrlog[i], i2c->icrlog[i]); - - pr_debug("\n"); + pr_cont(" [%03x:%05x]", i2c->isrlog[i], i2c->icrlog[i]); + pr_cont("\n"); } #else /* ifdef DEBUG */ @@ -709,11 +708,9 @@ static inline void i2c_pxa_stop_message(struct pxa_i2c *i2c) { u32 icr; - /* - * Clear the STOP and ACK flags - */ + /* Clear the START, STOP, ACK, TB and MA flags */ icr = readl(_ICR(i2c)); - icr &= ~(ICR_STOP | ICR_ACKNAK); + icr &= ~(ICR_START | ICR_STOP | ICR_ACKNAK | ICR_TB | ICR_MA); writel(icr, _ICR(i2c)); } diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c index 132c4a405bf83f622ebee5ea9ab7e3965e779017..db9ca8e926ca77aa70f129b5c729b2cea096dac8 100644 --- a/drivers/i2c/busses/i2c-rcar.c +++ b/drivers/i2c/busses/i2c-rcar.c @@ -817,6 +817,7 @@ static int rcar_unreg_slave(struct i2c_client *slave) /* disable irqs and ensure none is running before clearing ptr */ rcar_i2c_write(priv, ICSIER, 0); rcar_i2c_write(priv, ICSCR, 0); + rcar_i2c_write(priv, ICSAR, 0); /* Gen2: must be 0 if not using slave */ synchronize_irq(priv->irq); priv->slave = NULL; @@ -914,6 +915,8 @@ static int rcar_i2c_probe(struct platform_device *pdev) if (ret < 0) goto out_pm_put; + rcar_i2c_write(priv, ICSAR, 0); /* Gen2: must be 0 if not using slave */ + if (priv->devtype == I2C_RCAR_GEN3) { priv->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL); if (!IS_ERR(priv->rstc)) { diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c index eaa312bc3a3ce91508cb9d03db7155467b0797b6..c4066276eb7b97e2c59620bf3a5d54c679977913 100644 --- a/drivers/i2c/i2c-dev.c +++ b/drivers/i2c/i2c-dev.c @@ -47,7 +47,7 @@ struct i2c_dev { struct list_head list; struct i2c_adapter *adap; - struct device *dev; + struct device dev; struct cdev cdev; }; @@ -91,12 +91,14 @@ static struct i2c_dev *get_free_i2c_dev(struct i2c_adapter *adap) return i2c_dev; } -static void put_i2c_dev(struct i2c_dev *i2c_dev) +static void put_i2c_dev(struct i2c_dev *i2c_dev, bool del_cdev) { spin_lock(&i2c_dev_list_lock); list_del(&i2c_dev->list); spin_unlock(&i2c_dev_list_lock); - kfree(i2c_dev); + if (del_cdev) + cdev_device_del(&i2c_dev->cdev, &i2c_dev->dev); + put_device(&i2c_dev->dev); } static ssize_t name_show(struct device *dev, @@ -542,6 +544,14 @@ static const struct file_operations i2cdev_fops = { static struct class *i2c_dev_class; +static void i2cdev_dev_release(struct device *dev) +{ + struct i2c_dev *i2c_dev; + + i2c_dev = container_of(dev, struct i2c_dev, dev); + kfree(i2c_dev); +} + static int i2cdev_attach_adapter(struct device *dev, void *dummy) { struct i2c_adapter *adap; @@ -558,27 +568,23 @@ static int i2cdev_attach_adapter(struct device *dev, void *dummy) cdev_init(&i2c_dev->cdev, &i2cdev_fops); i2c_dev->cdev.owner = THIS_MODULE; - res = cdev_add(&i2c_dev->cdev, MKDEV(I2C_MAJOR, adap->nr), 1); - if (res) - goto error_cdev; - - /* register this i2c device with the driver core */ - i2c_dev->dev = device_create(i2c_dev_class, &adap->dev, - MKDEV(I2C_MAJOR, adap->nr), NULL, - "i2c-%d", adap->nr); - if (IS_ERR(i2c_dev->dev)) { - res = PTR_ERR(i2c_dev->dev); - goto error; + + device_initialize(&i2c_dev->dev); + i2c_dev->dev.devt = MKDEV(I2C_MAJOR, adap->nr); + i2c_dev->dev.class = i2c_dev_class; + i2c_dev->dev.parent = &adap->dev; + i2c_dev->dev.release = i2cdev_dev_release; + dev_set_name(&i2c_dev->dev, "i2c-%d", adap->nr); + + res = cdev_device_add(&i2c_dev->cdev, &i2c_dev->dev); + if (res) { + put_i2c_dev(i2c_dev, false); + return res; } pr_debug("i2c-dev: adapter [%s] registered as minor %d\n", adap->name, adap->nr); return 0; -error: - cdev_del(&i2c_dev->cdev); -error_cdev: - put_i2c_dev(i2c_dev); - return res; } static int i2cdev_detach_adapter(struct device *dev, void *dummy) @@ -594,9 +600,7 @@ static int i2cdev_detach_adapter(struct device *dev, void *dummy) if (!i2c_dev) /* attach_adapter must have failed */ return 0; - cdev_del(&i2c_dev->cdev); - put_i2c_dev(i2c_dev); - device_destroy(i2c_dev_class, MKDEV(I2C_MAJOR, adap->nr)); + put_i2c_dev(i2c_dev, true); pr_debug("i2c-dev: adapter [%s] unregistered\n", adap->name); return 0; diff --git a/drivers/i2c/muxes/i2c-demux-pinctrl.c b/drivers/i2c/muxes/i2c-demux-pinctrl.c index 33ce032cb70112e9a3304f789198fbb75602a47c..0c637ae81404d22264b61800384d4dd29d74a71f 100644 --- a/drivers/i2c/muxes/i2c-demux-pinctrl.c +++ b/drivers/i2c/muxes/i2c-demux-pinctrl.c @@ -270,6 +270,7 @@ static int i2c_demux_pinctrl_probe(struct platform_device *pdev) err_rollback_available: device_remove_file(&pdev->dev, &dev_attr_available_masters); err_rollback: + i2c_demux_deactivate_master(priv); for (j = 0; j < i; j++) { of_node_put(priv->chan[j].parent_np); of_changeset_destroy(&priv->chan[j].chgset); diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c index eb6e3dc789b274a63f0cfcf9e02f6368510f6e90..49263428c4cf0f65c0dd6494583d4892584b9876 100644 --- a/drivers/iio/accel/mma8452.c +++ b/drivers/iio/accel/mma8452.c @@ -1583,10 +1583,13 @@ static int mma8452_probe(struct i2c_client *client, ret = mma8452_set_freefall_mode(data, false); if (ret < 0) - goto buffer_cleanup; + goto unregister_device; return 0; +unregister_device: + iio_device_unregister(indio_dev); + buffer_cleanup: iio_triggered_buffer_cleanup(indio_dev); diff --git a/drivers/iio/accel/sca3000.c b/drivers/iio/accel/sca3000.c index f2761b3855411d6db48ef93291c7d4ac256ac93f..edceda640fa2f14cc3702ce20ef1f268dbf423d3 100644 --- a/drivers/iio/accel/sca3000.c +++ b/drivers/iio/accel/sca3000.c @@ -982,7 +982,7 @@ static int sca3000_read_data(struct sca3000_state *st, st->tx[0] = SCA3000_READ_REG(reg_address_high); ret = spi_sync_transfer(st->us, xfer, ARRAY_SIZE(xfer)); if (ret) { - dev_err(get_device(&st->us->dev), "problem reading register"); + dev_err(&st->us->dev, "problem reading register\n"); return ret; } diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c index 3cfb2d4b24412f63351f39ef931669600b72ca74..206feefbc4566fe66c717387006ef04ed2e9798f 100644 --- a/drivers/iio/adc/stm32-adc.c +++ b/drivers/iio/adc/stm32-adc.c @@ -1627,15 +1627,27 @@ static int stm32_adc_chan_of_init(struct iio_dev *indio_dev) return 0; } -static int stm32_adc_dma_request(struct iio_dev *indio_dev) +static int stm32_adc_dma_request(struct device *dev, struct iio_dev *indio_dev) { struct stm32_adc *adc = iio_priv(indio_dev); struct dma_slave_config config; int ret; - adc->dma_chan = dma_request_slave_channel(&indio_dev->dev, "rx"); - if (!adc->dma_chan) + adc->dma_chan = dma_request_chan(dev, "rx"); + if (IS_ERR(adc->dma_chan)) { + ret = PTR_ERR(adc->dma_chan); + if (ret != -ENODEV) { + if (ret != -EPROBE_DEFER) + dev_err(dev, + "DMA channel request failed with %d\n", + ret); + return ret; + } + + /* DMA is optional: fall back to IRQ mode */ + adc->dma_chan = NULL; return 0; + } adc->rx_buf = dma_alloc_coherent(adc->dma_chan->device->dev, STM32_DMA_BUFFER_SIZE, @@ -1749,7 +1761,7 @@ static int stm32_adc_probe(struct platform_device *pdev) if (ret < 0) goto err_clk_disable; - ret = stm32_adc_dma_request(indio_dev); + ret = stm32_adc_dma_request(dev, indio_dev); if (ret < 0) goto err_clk_disable; diff --git a/drivers/iio/dac/vf610_dac.c b/drivers/iio/dac/vf610_dac.c index c4ec7779b394effbc1fbf7f90b819eddf54f7854..190a7c1c56046c78b37fc9b67756e809a1b1491e 100644 --- a/drivers/iio/dac/vf610_dac.c +++ b/drivers/iio/dac/vf610_dac.c @@ -235,6 +235,7 @@ static int vf610_dac_probe(struct platform_device *pdev) return 0; error_iio_device_register: + vf610_dac_exit(info); clk_disable_unprepare(info->clk); return ret; diff --git a/drivers/iio/health/afe4403.c b/drivers/iio/health/afe4403.c index 6bb23a49e81eb8cf304a2f81f50c5e36a91e2d48..2f07c4d1398cd35a418eaa1067a5fc4b9a13d26b 100644 --- a/drivers/iio/health/afe4403.c +++ b/drivers/iio/health/afe4403.c @@ -71,6 +71,7 @@ static const struct reg_field afe4403_reg_fields[] = { * @regulator: Pointer to the regulator for the IC * @trig: IIO trigger for this device * @irq: ADC_RDY line interrupt number + * @buffer: Used to construct data layout to push into IIO buffer. */ struct afe4403_data { struct device *dev; @@ -80,6 +81,8 @@ struct afe4403_data { struct regulator *regulator; struct iio_trigger *trig; int irq; + /* Ensure suitable alignment for timestamp */ + s32 buffer[8] __aligned(8); }; enum afe4403_chan_id { @@ -318,7 +321,6 @@ static irqreturn_t afe4403_trigger_handler(int irq, void *private) struct iio_dev *indio_dev = pf->indio_dev; struct afe4403_data *afe = iio_priv(indio_dev); int ret, bit, i = 0; - s32 buffer[8]; u8 tx[4] = {AFE440X_CONTROL0, 0x0, 0x0, AFE440X_CONTROL0_READ}; u8 rx[3]; @@ -335,9 +337,9 @@ static irqreturn_t afe4403_trigger_handler(int irq, void *private) if (ret) goto err; - buffer[i++] = (rx[0] << 16) | - (rx[1] << 8) | - (rx[2]); + afe->buffer[i++] = (rx[0] << 16) | + (rx[1] << 8) | + (rx[2]); } /* Disable reading from the device */ @@ -346,7 +348,8 @@ static irqreturn_t afe4403_trigger_handler(int irq, void *private) if (ret) goto err; - iio_push_to_buffers_with_timestamp(indio_dev, buffer, pf->timestamp); + iio_push_to_buffers_with_timestamp(indio_dev, afe->buffer, + pf->timestamp); err: iio_trigger_notify_done(indio_dev->trig); diff --git a/drivers/iio/health/afe4404.c b/drivers/iio/health/afe4404.c index 964f5231a831c437c277e4bcfe292720104043f7..5e256b11ac877862cd6e734b38068f793b4123e0 100644 --- a/drivers/iio/health/afe4404.c +++ b/drivers/iio/health/afe4404.c @@ -91,6 +91,7 @@ static const struct reg_field afe4404_reg_fields[] = { * @regulator: Pointer to the regulator for the IC * @trig: IIO trigger for this device * @irq: ADC_RDY line interrupt number + * @buffer: Used to construct a scan to push to the iio buffer. */ struct afe4404_data { struct device *dev; @@ -99,6 +100,7 @@ struct afe4404_data { struct regulator *regulator; struct iio_trigger *trig; int irq; + s32 buffer[10] __aligned(8); }; enum afe4404_chan_id { @@ -337,17 +339,17 @@ static irqreturn_t afe4404_trigger_handler(int irq, void *private) struct iio_dev *indio_dev = pf->indio_dev; struct afe4404_data *afe = iio_priv(indio_dev); int ret, bit, i = 0; - s32 buffer[10]; for_each_set_bit(bit, indio_dev->active_scan_mask, indio_dev->masklength) { ret = regmap_read(afe->regmap, afe4404_channel_values[bit], - &buffer[i++]); + &afe->buffer[i++]); if (ret) goto err; } - iio_push_to_buffers_with_timestamp(indio_dev, buffer, pf->timestamp); + iio_push_to_buffers_with_timestamp(indio_dev, afe->buffer, + pf->timestamp); err: iio_trigger_notify_done(indio_dev->trig); diff --git a/drivers/iio/humidity/hdc100x.c b/drivers/iio/humidity/hdc100x.c index b470cb8132da6fd166642c567fd0b3a0259ca03c..273eb0612a5d3ad5986f0e56832eceb3798f9e0c 100644 --- a/drivers/iio/humidity/hdc100x.c +++ b/drivers/iio/humidity/hdc100x.c @@ -46,6 +46,11 @@ struct hdc100x_data { /* integration time of the sensor */ int adc_int_us[2]; + /* Ensure natural alignment of timestamp */ + struct { + __be16 channels[2]; + s64 ts __aligned(8); + } scan; }; /* integration time in us */ @@ -327,7 +332,6 @@ static irqreturn_t hdc100x_trigger_handler(int irq, void *p) struct i2c_client *client = data->client; int delay = data->adc_int_us[0] + data->adc_int_us[1]; int ret; - s16 buf[8]; /* 2x s16 + padding + 8 byte timestamp */ /* dual read starts at temp register */ mutex_lock(&data->lock); @@ -338,13 +342,13 @@ static irqreturn_t hdc100x_trigger_handler(int irq, void *p) } usleep_range(delay, delay + 1000); - ret = i2c_master_recv(client, (u8 *)buf, 4); + ret = i2c_master_recv(client, (u8 *)data->scan.channels, 4); if (ret < 0) { dev_err(&client->dev, "cannot read sensor data\n"); goto err; } - iio_push_to_buffers_with_timestamp(indio_dev, buf, + iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, iio_get_time_ns(indio_dev)); err: mutex_unlock(&data->lock); diff --git a/drivers/iio/light/vcnl4000.c b/drivers/iio/light/vcnl4000.c index 360b6e98137acbfbd604e6492687f0b34f424de5..5a3a532937ba50c80f4da70ab5f31513a855af07 100644 --- a/drivers/iio/light/vcnl4000.c +++ b/drivers/iio/light/vcnl4000.c @@ -61,7 +61,6 @@ static int vcnl4000_measure(struct vcnl4000_data *data, u8 req_mask, u8 rdy_mask, u8 data_reg, int *val) { int tries = 20; - __be16 buf; int ret; mutex_lock(&data->lock); @@ -88,13 +87,12 @@ static int vcnl4000_measure(struct vcnl4000_data *data, u8 req_mask, goto fail; } - ret = i2c_smbus_read_i2c_block_data(data->client, - data_reg, sizeof(buf), (u8 *) &buf); + ret = i2c_smbus_read_word_swapped(data->client, data_reg); if (ret < 0) goto fail; mutex_unlock(&data->lock); - *val = be16_to_cpu(buf); + *val = ret; return 0; diff --git a/drivers/iio/magnetometer/ak8974.c b/drivers/iio/magnetometer/ak8974.c index 283ecd4ea800d5ab7bdea4ae69bf6eefd854c9e7..11b9cc42bc09b10ea0e09ec2d8b9eeb6ce631504 100644 --- a/drivers/iio/magnetometer/ak8974.c +++ b/drivers/iio/magnetometer/ak8974.c @@ -184,6 +184,11 @@ struct ak8974 { bool drdy_irq; struct completion drdy_complete; bool drdy_active_low; + /* Ensure timestamp is naturally aligned */ + struct { + __le16 channels[3]; + s64 ts __aligned(8); + } scan; }; static const char ak8974_reg_avdd[] = "avdd"; @@ -580,7 +585,6 @@ static void ak8974_fill_buffer(struct iio_dev *indio_dev) { struct ak8974 *ak8974 = iio_priv(indio_dev); int ret; - __le16 hw_values[8]; /* Three axes + 64bit padding */ pm_runtime_get_sync(&ak8974->i2c->dev); mutex_lock(&ak8974->lock); @@ -590,13 +594,13 @@ static void ak8974_fill_buffer(struct iio_dev *indio_dev) dev_err(&ak8974->i2c->dev, "error triggering measure\n"); goto out_unlock; } - ret = ak8974_getresult(ak8974, hw_values); + ret = ak8974_getresult(ak8974, ak8974->scan.channels); if (ret) { dev_err(&ak8974->i2c->dev, "error getting measures\n"); goto out_unlock; } - iio_push_to_buffers_with_timestamp(indio_dev, hw_values, + iio_push_to_buffers_with_timestamp(indio_dev, &ak8974->scan, iio_get_time_ns(indio_dev)); out_unlock: @@ -765,19 +769,21 @@ static int ak8974_probe(struct i2c_client *i2c, ak8974->map = devm_regmap_init_i2c(i2c, &ak8974_regmap_config); if (IS_ERR(ak8974->map)) { dev_err(&i2c->dev, "failed to allocate register map\n"); + pm_runtime_put_noidle(&i2c->dev); + pm_runtime_disable(&i2c->dev); return PTR_ERR(ak8974->map); } ret = ak8974_set_power(ak8974, AK8974_PWR_ON); if (ret) { dev_err(&i2c->dev, "could not power on\n"); - goto power_off; + goto disable_pm; } ret = ak8974_detect(ak8974); if (ret) { dev_err(&i2c->dev, "neither AK8974 nor AMI30x found\n"); - goto power_off; + goto disable_pm; } ret = ak8974_selftest(ak8974); @@ -787,14 +793,9 @@ static int ak8974_probe(struct i2c_client *i2c, ret = ak8974_reset(ak8974); if (ret) { dev_err(&i2c->dev, "AK8974 reset failed\n"); - goto power_off; + goto disable_pm; } - pm_runtime_set_autosuspend_delay(&i2c->dev, - AK8974_AUTOSUSPEND_DELAY); - pm_runtime_use_autosuspend(&i2c->dev); - pm_runtime_put(&i2c->dev); - indio_dev->dev.parent = &i2c->dev; indio_dev->channels = ak8974_channels; indio_dev->num_channels = ARRAY_SIZE(ak8974_channels); @@ -847,6 +848,11 @@ static int ak8974_probe(struct i2c_client *i2c, goto cleanup_buffer; } + pm_runtime_set_autosuspend_delay(&i2c->dev, + AK8974_AUTOSUSPEND_DELAY); + pm_runtime_use_autosuspend(&i2c->dev); + pm_runtime_put(&i2c->dev); + return 0; cleanup_buffer: @@ -855,7 +861,6 @@ static int ak8974_probe(struct i2c_client *i2c, pm_runtime_put_noidle(&i2c->dev); pm_runtime_disable(&i2c->dev); ak8974_set_power(ak8974, AK8974_PWR_OFF); -power_off: regulator_bulk_disable(ARRAY_SIZE(ak8974->regs), ak8974->regs); return ret; diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c index 5f625ffa2a88d8b12d10491d85e920e519c8a97c..ae415b4e381afbcb906a025fb8e061cf6464ccc6 100644 --- a/drivers/iio/pressure/bmp280-core.c +++ b/drivers/iio/pressure/bmp280-core.c @@ -182,6 +182,8 @@ static u32 bmp280_compensate_humidity(struct bmp280_data *data, + (s32)2097152) * H2 + 8192) >> 14); var -= ((((var >> 15) * (var >> 15)) >> 7) * (s32)H1) >> 4; + var = clamp_val(var, 0, 419430400); + return var >> 12; }; @@ -651,7 +653,7 @@ static int bmp180_measure(struct bmp280_data *data, u8 ctrl_meas) unsigned int ctrl; if (data->use_eoc) - init_completion(&data->done); + reinit_completion(&data->done); ret = regmap_write(data->regmap, BMP280_REG_CTRL_MEAS, ctrl_meas); if (ret) @@ -907,6 +909,9 @@ static int bmp085_fetch_eoc_irq(struct device *dev, "trying to enforce it\n"); irq_trig = IRQF_TRIGGER_RISING; } + + init_completion(&data->done); + ret = devm_request_threaded_irq(dev, irq, bmp085_eoc_irq, diff --git a/drivers/iio/pressure/ms5611_core.c b/drivers/iio/pressure/ms5611_core.c index 2a77a2f157521c7383d666fd8b1ee4311cffb1db..b4b343c3bd8739df4a10167b58ef0b88c266f017 100644 --- a/drivers/iio/pressure/ms5611_core.c +++ b/drivers/iio/pressure/ms5611_core.c @@ -215,16 +215,21 @@ static irqreturn_t ms5611_trigger_handler(int irq, void *p) struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct ms5611_state *st = iio_priv(indio_dev); - s32 buf[4]; /* s32 (pressure) + s32 (temp) + 2 * s32 (timestamp) */ + /* Ensure buffer elements are naturally aligned */ + struct { + s32 channels[2]; + s64 ts __aligned(8); + } scan; int ret; mutex_lock(&st->lock); - ret = ms5611_read_temp_and_pressure(indio_dev, &buf[1], &buf[0]); + ret = ms5611_read_temp_and_pressure(indio_dev, &scan.channels[1], + &scan.channels[0]); mutex_unlock(&st->lock); if (ret < 0) goto err; - iio_push_to_buffers_with_timestamp(indio_dev, buf, + iio_push_to_buffers_with_timestamp(indio_dev, &scan, iio_get_time_ns(indio_dev)); err: diff --git a/drivers/iio/pressure/zpa2326.c b/drivers/iio/pressure/zpa2326.c index 91431454eb85de1c51cf88e477ab3d8378e4d007..7ffc4a6d80cd58d36f9f9d7824c46eafd5379823 100644 --- a/drivers/iio/pressure/zpa2326.c +++ b/drivers/iio/pressure/zpa2326.c @@ -672,8 +672,10 @@ static int zpa2326_resume(const struct iio_dev *indio_dev) int err; err = pm_runtime_get_sync(indio_dev->dev.parent); - if (err < 0) + if (err < 0) { + pm_runtime_put(indio_dev->dev.parent); return err; + } if (err > 0) { /* diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index c2bbe0df0931c70dea994caded347ab6dc7863a3..c0c180fc6fb23135f7ecf7b1bafa6df848e67cd7 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c @@ -450,16 +450,15 @@ static int addr6_resolve(struct sockaddr_in6 *src_in, struct flowi6 fl6; struct dst_entry *dst; struct rt6_info *rt; - int ret; memset(&fl6, 0, sizeof fl6); fl6.daddr = dst_in->sin6_addr; fl6.saddr = src_in->sin6_addr; fl6.flowi6_oif = addr->bound_dev_if; - ret = ipv6_stub->ipv6_dst_lookup(addr->net, NULL, &dst, &fl6); - if (ret < 0) - return ret; + dst = ipv6_stub->ipv6_dst_lookup_flow(addr->net, NULL, &fl6, NULL); + if (IS_ERR(dst)) + return PTR_ERR(dst); rt = (struct rt6_info *)dst; if (ipv6_addr_any(&src_in->sin6_addr)) { diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index d901591db9c8eb62d9f5630d567d8437f3a33975..6e8af2b914929fffe63a55d533b12d222ad4e6e6 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -1482,6 +1482,8 @@ static struct rdma_id_private *cma_find_listener( { struct rdma_id_private *id_priv, *id_priv_dev; + lockdep_assert_held(&lock); + if (!bind_list) return ERR_PTR(-EINVAL); @@ -1530,6 +1532,7 @@ static struct rdma_id_private *cma_id_from_event(struct ib_cm_id *cm_id, } } + mutex_lock(&lock); /* * Net namespace might be getting deleted while route lookup, * cm_id lookup is in progress. Therefore, perform netdevice @@ -1571,6 +1574,7 @@ static struct rdma_id_private *cma_id_from_event(struct ib_cm_id *cm_id, id_priv = cma_find_listener(bind_list, cm_id, ib_event, &req, *net_dev); err: rcu_read_unlock(); + mutex_unlock(&lock); if (IS_ERR(id_priv) && *net_dev) { dev_put(*net_dev); *net_dev = NULL; @@ -2287,6 +2291,8 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv, struct net *net = id_priv->id.route.addr.dev_addr.net; int ret; + lockdep_assert_held(&lock); + if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(cma_dev->device, 1)) return; @@ -2993,6 +2999,8 @@ static void cma_bind_port(struct rdma_bind_list *bind_list, u64 sid, mask; __be16 port; + lockdep_assert_held(&lock); + addr = cma_src_addr(id_priv); port = htons(bind_list->port); @@ -3021,6 +3029,8 @@ static int cma_alloc_port(enum rdma_port_space ps, struct rdma_bind_list *bind_list; int ret; + lockdep_assert_held(&lock); + bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL); if (!bind_list) return -ENOMEM; @@ -3047,6 +3057,8 @@ static int cma_port_is_unique(struct rdma_bind_list *bind_list, struct sockaddr *saddr = cma_src_addr(id_priv); __be16 dport = cma_port(daddr); + lockdep_assert_held(&lock); + hlist_for_each_entry(cur_id, &bind_list->owners, node) { struct sockaddr *cur_daddr = cma_dst_addr(cur_id); struct sockaddr *cur_saddr = cma_src_addr(cur_id); @@ -3086,6 +3098,8 @@ static int cma_alloc_any_port(enum rdma_port_space ps, unsigned int rover; struct net *net = id_priv->id.route.addr.dev_addr.net; + lockdep_assert_held(&lock); + inet_get_local_port_range(net, &low, &high); remaining = (high - low) + 1; rover = prandom_u32() % remaining + low; @@ -3133,6 +3147,8 @@ static int cma_check_port(struct rdma_bind_list *bind_list, struct rdma_id_private *cur_id; struct sockaddr *addr, *cur_addr; + lockdep_assert_held(&lock); + addr = cma_src_addr(id_priv); hlist_for_each_entry(cur_id, &bind_list->owners, node) { if (id_priv == cur_id) @@ -3163,6 +3179,8 @@ static int cma_use_port(enum rdma_port_space ps, unsigned short snum; int ret; + lockdep_assert_held(&lock); + snum = ntohs(cma_port(cma_src_addr(id_priv))); if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE)) return -EACCES; diff --git a/drivers/infiniband/core/cma_configfs.c b/drivers/infiniband/core/cma_configfs.c index 54076a3e800767fc659420a91f61a84e566f63c6..ac47e8a1dfbf4fb6ecb57b32950d3d0d8fa443b5 100644 --- a/drivers/infiniband/core/cma_configfs.c +++ b/drivers/infiniband/core/cma_configfs.c @@ -319,8 +319,21 @@ static struct config_group *make_cma_dev(struct config_group *group, return ERR_PTR(err); } +static void drop_cma_dev(struct config_group *cgroup, struct config_item *item) +{ + struct config_group *group = + container_of(item, struct config_group, cg_item); + struct cma_dev_group *cma_dev_group = + container_of(group, struct cma_dev_group, device_group); + + configfs_remove_default_groups(&cma_dev_group->ports_group); + configfs_remove_default_groups(&cma_dev_group->device_group); + config_item_put(item); +} + static struct configfs_group_operations cma_subsys_group_ops = { .make_group = make_cma_dev, + .drop_item = drop_cma_dev, }; static struct config_item_type cma_subsys_type = { diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 6072ac7023cb75c2a6bc69f310d9e5f3f94a2a08..08d2e9cc28eb4bc51e20226c763b47a0fd40af3a 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -2907,6 +2907,7 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, DMA_FROM_DEVICE); if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device, sg_list.addr))) { + kfree(mad_priv); ret = -ENOMEM; break; } diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c index 57b41125b1462e4c9e9542dbac06c0e937434ac0..688ce1846911bbadfe1868176462744da7d52bdf 100644 --- a/drivers/infiniband/core/umem_odp.c +++ b/drivers/infiniband/core/umem_odp.c @@ -306,7 +306,8 @@ int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem, vma = find_vma(mm, ib_umem_start(umem)); if (!vma || !is_vm_hugetlb_page(vma)) { up_read(&mm->mmap_sem); - return -EINVAL; + ret_val = -EINVAL; + goto out_mm; } h = hstate_vma(vma); umem->page_shift = huge_page_shift(h); diff --git a/drivers/infiniband/hw/i40iw/i40iw_hw.c b/drivers/infiniband/hw/i40iw/i40iw_hw.c index 476867a3f584f33e1b7f36b0e22b5cd07605eac0..4ded9411fb3254587ce4db6acbe32b348c9bb9ca 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_hw.c +++ b/drivers/infiniband/hw/i40iw/i40iw_hw.c @@ -483,7 +483,7 @@ void i40iw_manage_arp_cache(struct i40iw_device *iwdev, int arp_index; arp_index = i40iw_arp_table(iwdev, ip_addr, ipv4, mac_addr, action); - if (arp_index == -1) + if (arp_index < 0) return; cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false); if (!cqp_request) diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index e10c3d915e389156fb26debfbd33cbec7c6cd86b..df1ecd29057f8d3abfa6f08f3dbeb13d75662daa 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -2917,6 +2917,7 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp, int send_size; int header_size; int spc; + int err; int i; if (wr->wr.opcode != IB_WR_SEND) @@ -2951,7 +2952,9 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp, sqp->ud_header.lrh.virtual_lane = 0; sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED); - ib_get_cached_pkey(ib_dev, sqp->qp.port, 0, &pkey); + err = ib_get_cached_pkey(ib_dev, sqp->qp.port, 0, &pkey); + if (err) + return err; sqp->ud_header.bth.pkey = cpu_to_be16(pkey); if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_TUN_SMI_OWNER) sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn); @@ -3240,9 +3243,14 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_ud_wr *wr, } sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED); if (!sqp->qp.ibqp.qp_num) - ib_get_cached_pkey(ib_dev, sqp->qp.port, sqp->pkey_index, &pkey); + err = ib_get_cached_pkey(ib_dev, sqp->qp.port, sqp->pkey_index, + &pkey); else - ib_get_cached_pkey(ib_dev, sqp->qp.port, wr->pkey_index, &pkey); + err = ib_get_cached_pkey(ib_dev, sqp->qp.port, wr->pkey_index, + &pkey); + if (err) + return err; + sqp->ud_header.bth.pkey = cpu_to_be16(pkey); sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn); sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1)); diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c index d831f3e61ae8ff6b15f5c734098a702746f1f34f..2626205780ee4578e16eda3cbf010c28f4ce58a2 100644 --- a/drivers/infiniband/hw/qib/qib_sysfs.c +++ b/drivers/infiniband/hw/qib/qib_sysfs.c @@ -756,7 +756,7 @@ int qib_create_port_files(struct ib_device *ibdev, u8 port_num, qib_dev_err(dd, "Skipping linkcontrol sysfs info, (err %d) port %u\n", ret, port_num); - goto bail; + goto bail_link; } kobject_uevent(&ppd->pport_kobj, KOBJ_ADD); @@ -766,7 +766,7 @@ int qib_create_port_files(struct ib_device *ibdev, u8 port_num, qib_dev_err(dd, "Skipping sl2vl sysfs info, (err %d) port %u\n", ret, port_num); - goto bail_link; + goto bail_sl; } kobject_uevent(&ppd->sl2vl_kobj, KOBJ_ADD); @@ -776,7 +776,7 @@ int qib_create_port_files(struct ib_device *ibdev, u8 port_num, qib_dev_err(dd, "Skipping diag_counters sysfs info, (err %d) port %u\n", ret, port_num); - goto bail_sl; + goto bail_diagc; } kobject_uevent(&ppd->diagc_kobj, KOBJ_ADD); @@ -789,7 +789,7 @@ int qib_create_port_files(struct ib_device *ibdev, u8 port_num, qib_dev_err(dd, "Skipping Congestion Control sysfs info, (err %d) port %u\n", ret, port_num); - goto bail_diagc; + goto bail_cc; } kobject_uevent(&ppd->pport_cc_kobj, KOBJ_ADD); @@ -871,6 +871,7 @@ void qib_verbs_unregister_sysfs(struct qib_devdata *dd) &cc_table_bin_attr); kobject_put(&ppd->pport_cc_kobj); } + kobject_put(&ppd->diagc_kobj); kobject_put(&ppd->sl2vl_kobj); kobject_put(&ppd->pport_kobj); } diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c index d549c9ffadcbb558598ff1fa01e82df0dbce7a3a..867303235f57a3ed41ed5cbfe7a3a2f3ca408f5b 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c @@ -774,7 +774,7 @@ static int pvrdma_pci_probe(struct pci_dev *pdev, !(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) { dev_err(&pdev->dev, "PCI BAR region not MMIO\n"); ret = -ENOMEM; - goto err_free_device; + goto err_disable_pdev; } ret = pci_request_regions(pdev, DRV_NAME); diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c index 59dee10bebcbd96f701c572e9a9d8331add13694..24a68a9da8be50aec184fc820063c57d0ab7d1e1 100644 --- a/drivers/infiniband/sw/rxe/rxe_net.c +++ b/drivers/infiniband/sw/rxe/rxe_net.c @@ -154,10 +154,12 @@ static struct dst_entry *rxe_find_route6(struct net_device *ndev, memcpy(&fl6.daddr, daddr, sizeof(*daddr)); fl6.flowi6_proto = IPPROTO_UDP; - if (unlikely(ipv6_stub->ipv6_dst_lookup(sock_net(recv_sockets.sk6->sk), - recv_sockets.sk6->sk, &ndst, &fl6))) { + ndst = ipv6_stub->ipv6_dst_lookup_flow(sock_net(recv_sockets.sk6->sk), + recv_sockets.sk6->sk, &fl6, + NULL); + if (unlikely(IS_ERR(ndst))) { pr_err_ratelimited("no route to %pI6\n", daddr); - goto put; + return NULL; } if (unlikely(ndst->error)) { diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c index 92557147500560212f118d31258feb810e636cf3..2463d02e3f991e2fc1778ffddf94e81c60dec27f 100644 --- a/drivers/input/evdev.c +++ b/drivers/input/evdev.c @@ -342,20 +342,6 @@ static int evdev_fasync(int fd, struct file *file, int on) return fasync_helper(fd, file, on, &client->fasync); } -static int evdev_flush(struct file *file, fl_owner_t id) -{ - struct evdev_client *client = file->private_data; - struct evdev *evdev = client->evdev; - - mutex_lock(&evdev->mutex); - - if (evdev->exist && !client->revoked) - input_flush_device(&evdev->handle, file); - - mutex_unlock(&evdev->mutex); - return 0; -} - static void evdev_free(struct device *dev) { struct evdev *evdev = container_of(dev, struct evdev, dev); @@ -469,6 +455,10 @@ static int evdev_release(struct inode *inode, struct file *file) unsigned int i; mutex_lock(&evdev->mutex); + + if (evdev->exist && !client->revoked) + input_flush_device(&evdev->handle, file); + evdev_ungrab(evdev, client); mutex_unlock(&evdev->mutex); @@ -1331,7 +1321,6 @@ static const struct file_operations evdev_fops = { .compat_ioctl = evdev_ioctl_compat, #endif .fasync = evdev_fasync, - .flush = evdev_flush, .llseek = no_llseek, }; diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index 26476a64e66396f117921bfda743bc87fc4dbcbb..54a6691d7d878fefa519f5d803dafed4b129702e 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c @@ -475,6 +475,16 @@ static const u8 xboxone_fw2015_init[] = { 0x05, 0x20, 0x00, 0x01, 0x00 }; +/* + * This packet is required for Xbox One S (0x045e:0x02ea) + * and Xbox One Elite Series 2 (0x045e:0x0b00) pads to + * initialize the controller that was previously used in + * Bluetooth mode. + */ +static const u8 xboxone_s_init[] = { + 0x05, 0x20, 0x00, 0x0f, 0x06 +}; + /* * This packet is required for the Titanfall 2 Xbox One pads * (0x0e6f:0x0165) to finish initialization and for Hori pads @@ -533,6 +543,8 @@ static const struct xboxone_init_packet xboxone_init_packets[] = { XBOXONE_INIT_PKT(0x0e6f, 0x0165, xboxone_hori_init), XBOXONE_INIT_PKT(0x0f0d, 0x0067, xboxone_hori_init), XBOXONE_INIT_PKT(0x0000, 0x0000, xboxone_fw2015_init), + XBOXONE_INIT_PKT(0x045e, 0x02ea, xboxone_s_init), + XBOXONE_INIT_PKT(0x045e, 0x0b00, xboxone_s_init), XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_init1), XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_init2), XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init), diff --git a/drivers/input/keyboard/dlink-dir685-touchkeys.c b/drivers/input/keyboard/dlink-dir685-touchkeys.c index 88e321b763975b9d97676dddc8b5eed536d36064..6fe4062e3ac2cdd8daf9deb4a7ed436a1cba4760 100644 --- a/drivers/input/keyboard/dlink-dir685-touchkeys.c +++ b/drivers/input/keyboard/dlink-dir685-touchkeys.c @@ -142,7 +142,7 @@ MODULE_DEVICE_TABLE(of, dir685_tk_of_match); static struct i2c_driver dir685_tk_i2c_driver = { .driver = { - .name = "dlin-dir685-touchkeys", + .name = "dlink-dir685-touchkeys", .of_match_table = of_match_ptr(dir685_tk_of_match), }, .probe = dir685_tk_probe, diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index 2bca84f4c2b2ea08e9bffa14215598d12745406b..0714d572e49a33ddb97546e90eca31a9c5dfcea3 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c @@ -173,6 +173,7 @@ static const char * const smbus_pnp_ids[] = { "LEN005b", /* P50 */ "LEN005e", /* T560 */ "LEN006c", /* T470s */ + "LEN007a", /* T470s */ "LEN0071", /* T480 */ "LEN0072", /* X1 Carbon Gen 5 (2017) - Elan/ALPS trackpoint */ "LEN0073", /* X1 Carbon G5 (Elantech) */ @@ -181,6 +182,7 @@ static const char * const smbus_pnp_ids[] = { "LEN0093", /* T480 */ "LEN0096", /* X280 */ "LEN0097", /* X280 -> ALPS trackpoint */ + "LEN0099", /* X1 Extreme 1st */ "LEN009b", /* T580 */ "LEN200f", /* T450s */ "LEN2044", /* L470 */ diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c index 997ccae7ee0557c3ff60e9cad15ca7f08cbb70f8..30a8d816c45c947204da370053a0711b7f247bc8 100644 --- a/drivers/input/rmi4/rmi_driver.c +++ b/drivers/input/rmi4/rmi_driver.c @@ -232,7 +232,7 @@ static irqreturn_t rmi_irq_fn(int irq, void *dev_id) if (count) { kfree(attn_data.data); - attn_data.data = NULL; + drvdata->attn_data.data = NULL; } if (!kfifo_is_empty(&drvdata->attn_fifo)) @@ -1220,7 +1220,8 @@ static int rmi_driver_probe(struct device *dev) if (data->input) { rmi_driver_set_input_name(rmi_dev, data->input); if (!rmi_dev->xport->input) { - if (input_register_device(data->input)) { + retval = input_register_device(data->input); + if (retval) { dev_err(dev, "%s: Failed to register input device.\n", __func__); goto err_destroy_functions; diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index 0d0f977a2f393dcabc09e01c5178e2f2117354ca..8134c7f928165996a4d3096da8ec251063fd6894 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h @@ -429,6 +429,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "076804U"), }, }, + { + /* Lenovo XiaoXin Air 12 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "80UN"), + }, + }, { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), @@ -666,6 +673,13 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "P65xRP"), }, }, + { + /* Lenovo ThinkPad Twist S230u */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "33474HU"), + }, + }, { } }; diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c index 2c41107240dec274e5ec34724a882d87189f0103..499402a975b3ce07a9abbad7b9813b16cddbdf0e 100644 --- a/drivers/input/touchscreen/usbtouchscreen.c +++ b/drivers/input/touchscreen/usbtouchscreen.c @@ -197,6 +197,7 @@ static const struct usb_device_id usbtouch_devices[] = { #endif #ifdef CONFIG_TOUCHSCREEN_USB_IRTOUCH + {USB_DEVICE(0x255e, 0x0001), .driver_info = DEVTYPE_IRTOUCH}, {USB_DEVICE(0x595a, 0x0001), .driver_info = DEVTYPE_IRTOUCH}, {USB_DEVICE(0x6615, 0x0001), .driver_info = DEVTYPE_IRTOUCH}, {USB_DEVICE(0x6615, 0x0012), .driver_info = DEVTYPE_IRTOUCH_HIRES}, diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 778f167be2d3517656799bd609f84d53765457a3..494caaa265af0b5dd6624ffa916f97db733239c7 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -4394,9 +4394,10 @@ int amd_iommu_create_irq_domain(struct amd_iommu *iommu) if (!fn) return -ENOMEM; iommu->ir_domain = irq_domain_create_tree(fn, &amd_ir_domain_ops, iommu); - irq_domain_free_fwnode(fn); - if (!iommu->ir_domain) + if (!iommu->ir_domain) { + irq_domain_free_fwnode(fn); return -ENOMEM; + } iommu->ir_domain->parent = arch_get_ir_parent_domain(); iommu->msi_domain = arch_create_remap_msi_irq_domain(iommu->ir_domain, diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index 6c228144b3da528564d78aa51cbe77554bb9e36d..ec9a20e069417e0356ccec5e69f1279484d33a75 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -1317,8 +1317,8 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu, } case IVHD_DEV_ACPI_HID: { u16 devid; - u8 hid[ACPIHID_HID_LEN] = {0}; - u8 uid[ACPIHID_UID_LEN] = {0}; + u8 hid[ACPIHID_HID_LEN]; + u8 uid[ACPIHID_UID_LEN]; int ret; if (h->type != 0x40) { @@ -1335,6 +1335,7 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu, break; } + uid[0] = '\0'; switch (e->uidf) { case UID_NOT_PRESENT: @@ -1349,8 +1350,8 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu, break; case UID_IS_CHARACTER: - memcpy(uid, (u8 *)(&e->uid), ACPIHID_UID_LEN - 1); - uid[ACPIHID_UID_LEN - 1] = '\0'; + memcpy(uid, &e->uid, e->uidl); + uid[e->uidl] = '\0'; break; default: diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c index 25842b566c39c1497d1cecd91b6af8a079492185..154949a499c2177a1c7debea8fca711a126f692a 100644 --- a/drivers/iommu/intel_irq_remapping.c +++ b/drivers/iommu/intel_irq_remapping.c @@ -536,8 +536,8 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu) 0, INTR_REMAP_TABLE_ENTRIES, fn, &intel_ir_domain_ops, iommu); - irq_domain_free_fwnode(fn); if (!iommu->ir_domain) { + irq_domain_free_fwnode(fn); pr_err("IR%d: failed to allocate irqdomain\n", iommu->seq_id); goto out_free_bitmap; } diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 309579a375fb9ac8569011f1210ec41ad12ce7ff..77de52c894747d570f306a023ae7aa9d7e3f8d5a 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -385,7 +385,7 @@ struct iommu_group *iommu_group_alloc(void) NULL, "%d", group->id); if (ret) { ida_simple_remove(&iommu_group_ida, group->id); - kfree(group); + kobject_put(&group->kobj); return ERR_PTR(ret); } diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index e4020bb8cd2d639c9c7dbb5b594d6942f2628916..43ebc5026ebfbd1da754827b1a5580796737cbe7 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c @@ -449,10 +449,8 @@ static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu) static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, bool force) { - void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3); - unsigned int cpu, shift = (gic_irq(d) % 4) * 8; - u32 val, mask, bit; - unsigned long flags; + void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + gic_irq(d); + unsigned int cpu; if (!force) cpu = cpumask_any_and(mask_val, cpu_online_mask); @@ -462,13 +460,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids) return -EINVAL; - gic_lock_irqsave(flags); - mask = 0xff << shift; - bit = gic_cpu_map[cpu] << shift; - val = readl_relaxed(reg) & ~mask; - writel_relaxed(val | bit, reg); - gic_unlock_irqrestore(flags); - + writeb_relaxed(gic_cpu_map[cpu], reg); irq_data_update_effective_affinity(d, cpumask_of(cpu)); return IRQ_SET_MASK_OK_DONE; diff --git a/drivers/macintosh/windfarm_pm112.c b/drivers/macintosh/windfarm_pm112.c index 96d16fca68b2469f5a73c57369deaac3ed04e8ec..088ca17a843d3db52b18682db75d32b7a2da1bdf 100644 --- a/drivers/macintosh/windfarm_pm112.c +++ b/drivers/macintosh/windfarm_pm112.c @@ -133,14 +133,6 @@ static int create_cpu_loop(int cpu) s32 tmax; int fmin; - /* Get PID params from the appropriate SAT */ - hdr = smu_sat_get_sdb_partition(chip, 0xC8 + core, NULL); - if (hdr == NULL) { - printk(KERN_WARNING"windfarm: can't get CPU PID fan config\n"); - return -EINVAL; - } - piddata = (struct smu_sdbp_cpupiddata *)&hdr[1]; - /* Get FVT params to get Tmax; if not found, assume default */ hdr = smu_sat_get_sdb_partition(chip, 0xC4 + core, NULL); if (hdr) { @@ -153,6 +145,16 @@ static int create_cpu_loop(int cpu) if (tmax < cpu_all_tmax) cpu_all_tmax = tmax; + kfree(hdr); + + /* Get PID params from the appropriate SAT */ + hdr = smu_sat_get_sdb_partition(chip, 0xC8 + core, NULL); + if (hdr == NULL) { + printk(KERN_WARNING"windfarm: can't get CPU PID fan config\n"); + return -EINVAL; + } + piddata = (struct smu_sdbp_cpupiddata *)&hdr[1]; + /* * Darwin has a minimum fan speed of 1000 rpm for the 4-way and * 515 for the 2-way. That appears to be overkill, so for now, @@ -175,6 +177,9 @@ static int create_cpu_loop(int cpu) pid.min = fmin; wf_cpu_pid_init(&cpu_pid[cpu], &pid); + + kfree(hdr); + return 0; } diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 96a6583e7b522c826dace766d6922cc2494ed1ec..66c764491a8306dece1a4240d1f2841b4b2623a9 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -1374,7 +1374,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op, if (__set_blocks(n1, n1->keys + n2->keys, block_bytes(b->c)) > btree_blocks(new_nodes[i])) - goto out_nocoalesce; + goto out_unlock_nocoalesce; keys = n2->keys; /* Take the key of the node we're getting rid of */ @@ -1403,7 +1403,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op, if (__bch_keylist_realloc(&keylist, bkey_u64s(&new_nodes[i]->key))) - goto out_nocoalesce; + goto out_unlock_nocoalesce; bch_btree_node_write(new_nodes[i], &cl); bch_keylist_add(&keylist, &new_nodes[i]->key); @@ -1449,6 +1449,10 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op, /* Invalidated our iterator */ return -EINTR; +out_unlock_nocoalesce: + for (i = 0; i < nodes; i++) + mutex_unlock(&new_nodes[i]->write_lock); + out_nocoalesce: closure_sync(&cl); bch_keylist_free(&keylist); diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index cb959a0e711d92979cfedacc619c396690ed9502..651a8359550d4c884fd30d4490005b2c897cea90 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -2892,7 +2892,8 @@ static int crypt_map(struct dm_target *ti, struct bio *bio) * - for REQ_OP_DISCARD caller must use flush if IO ordering matters */ if (unlikely(bio->bi_opf & REQ_PREFLUSH || - bio_op(bio) == REQ_OP_DISCARD)) { + bio_op(bio) == REQ_OP_DISCARD || + bio_should_skip_dm_default_key(bio))) { bio_set_dev(bio, cc->dev->bdev); if (bio_sectors(bio)) bio->bi_iter.bi_sector = cc->start + @@ -3098,7 +3099,7 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits) limits->max_segment_size = PAGE_SIZE; limits->logical_block_size = - max_t(unsigned short, limits->logical_block_size, cc->sector_size); + max_t(unsigned, limits->logical_block_size, cc->sector_size); limits->physical_block_size = max_t(unsigned, limits->physical_block_size, cc->sector_size); limits->io_min = max_t(unsigned, limits->io_min, cc->sector_size); diff --git a/drivers/md/dm-default-key.c b/drivers/md/dm-default-key.c index dba7543a1bd5bd77a5d4b8c2e3022f08748eca3d..277a21d74830a462d16828c6e8f3b40f7ff338ce 100644 --- a/drivers/md/dm-default-key.c +++ b/drivers/md/dm-default-key.c @@ -52,6 +52,7 @@ struct default_key_c { struct blk_crypto_key key; bool is_hw_wrapped; u64 max_dun; + bool set_dun; }; static const struct dm_default_key_cipher * @@ -122,6 +123,8 @@ static int default_key_ctr_optional(struct dm_target *ti, iv_large_sectors = true; } else if (!strcmp(opt_string, "wrappedkey_v0")) { dkc->is_hw_wrapped = true; + } else if (!strcmp(opt_string, "set_dun")) { + dkc->set_dun = true; } else { ti->error = "Invalid feature arguments"; return -EINVAL; @@ -160,7 +163,9 @@ static void default_key_adjust_sector_size_and_iv(char **argv, memcpy(raw, key_new.bytes, size); - if (ti->len & (((*dkc)->sector_size >> SECTOR_SHIFT) - 1)) + if ((ti->len & (((*dkc)->sector_size >> SECTOR_SHIFT) - 1)) || + ((*dkc)->dev->bdev->bd_disk->disk_name[0] && + !strcmp((*dkc)->dev->bdev->bd_disk->disk_name, "mmcblk0"))) (*dkc)->sector_size = SECTOR_SIZE; if (dev->bdev->bd_part) diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 8b7328666eaafc89384afa198a41da94df429a25..7c60aace8d25d86e26bdb02a2eae71cc7a934664 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -1815,7 +1815,7 @@ static int multipath_prepare_ioctl(struct dm_target *ti, int r; current_pgpath = READ_ONCE(m->current_pgpath); - if (!current_pgpath) + if (!current_pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags)) current_pgpath = choose_pgpath(m, 0); if (current_pgpath) { diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c index 4d658a0c602587358caca34268f04eb990754227..c6d3a4bc811ca4aebec8b47f30d634d4d00a0c98 100644 --- a/drivers/md/dm-zoned-metadata.c +++ b/drivers/md/dm-zoned-metadata.c @@ -1580,7 +1580,7 @@ static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd) return dzone; } - return ERR_PTR(-EBUSY); + return NULL; } /* @@ -1600,7 +1600,7 @@ static struct dm_zone *dmz_get_seq_zone_for_reclaim(struct dmz_metadata *zmd) return zone; } - return ERR_PTR(-EBUSY); + return NULL; } /* diff --git a/drivers/md/dm-zoned-reclaim.c b/drivers/md/dm-zoned-reclaim.c index 2fad512dce98f7d2dff94e6165b52f29a7bdf76a..1015b200330b8710e85334a78819e1e697d29274 100644 --- a/drivers/md/dm-zoned-reclaim.c +++ b/drivers/md/dm-zoned-reclaim.c @@ -350,8 +350,8 @@ static int dmz_do_reclaim(struct dmz_reclaim *zrc) /* Get a data zone */ dzone = dmz_get_zone_for_reclaim(zmd); - if (IS_ERR(dzone)) - return PTR_ERR(dzone); + if (!dzone) + return -EBUSY; start = jiffies; diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c index 497a2bc5da51f6d3ef9af19d065ac871b331420d..9d8d453b2735ff78254af16396ff3a4f29c3a720 100644 --- a/drivers/md/dm-zoned-target.c +++ b/drivers/md/dm-zoned-target.c @@ -789,7 +789,7 @@ static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv) } /* Set target (no write same support) */ - ti->max_io_len = dev->zone_nr_sectors << 9; + ti->max_io_len = dev->zone_nr_sectors; ti->num_flush_bios = 1; ti->num_discard_bios = 1; ti->num_write_zeroes_bios = 1; diff --git a/drivers/md/dm.c b/drivers/md/dm.c index bb78417a249bb0c8cedb1a76db2091835626de1d..1c621a8a788655e7f97dd25bec14a37ad66fa617 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -2837,17 +2838,25 @@ EXPORT_SYMBOL_GPL(dm_internal_resume_fast); int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, unsigned cookie) { + int r; + unsigned noio_flag; char udev_cookie[DM_COOKIE_LENGTH]; char *envp[] = { udev_cookie, NULL }; + noio_flag = memalloc_noio_save(); + if (!cookie) - return kobject_uevent(&disk_to_dev(md->disk)->kobj, action); + r = kobject_uevent(&disk_to_dev(md->disk)->kobj, action); else { snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u", DM_COOKIE_ENV_VAR_NAME, cookie); - return kobject_uevent_env(&disk_to_dev(md->disk)->kobj, - action, envp); + r = kobject_uevent_env(&disk_to_dev(md->disk)->kobj, + action, envp); } + + memalloc_noio_restore(noio_flag); + + return r; } uint32_t dm_next_uevent_seq(struct mapped_device *md) diff --git a/drivers/md/md.c b/drivers/md/md.c index b2075f9609edbcd7ea771665d13caf8bbf3be459..223e1f5e5e23b61863fca6899bc320bae0f4036d 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -1168,6 +1168,8 @@ static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev) mddev->new_layout = mddev->layout; mddev->new_chunk_sectors = mddev->chunk_sectors; } + if (mddev->level == 0) + mddev->layout = -1; if (sb->state & (1<recovery_cp = MaxSector; @@ -1584,6 +1586,10 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_ rdev->ppl.sector = rdev->sb_start + rdev->ppl.offset; } + if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RAID0_LAYOUT) && + sb->level != 0) + return -EINVAL; + if (!refdev) { ret = 1; } else { @@ -1694,6 +1700,10 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev) mddev->new_chunk_sectors = mddev->chunk_sectors; } + if (mddev->level == 0 && + !(le32_to_cpu(sb->feature_map) & MD_FEATURE_RAID0_LAYOUT)) + mddev->layout = -1; + if (le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL) set_bit(MD_HAS_JOURNAL, &mddev->flags); @@ -6757,6 +6767,9 @@ static int set_array_info(struct mddev *mddev, mdu_array_info_t *info) mddev->external = 0; mddev->layout = info->layout; + if (mddev->level == 0) + /* Cannot trust RAID0 layout info here */ + mddev->layout = -1; mddev->chunk_sectors = info->chunk_size >> 9; if (mddev->persistent) { @@ -7411,7 +7424,8 @@ static int md_open(struct block_device *bdev, fmode_t mode) */ mddev_put(mddev); /* Wait until bdev->bd_disk is definitely gone */ - flush_workqueue(md_misc_wq); + if (work_pending(&mddev->del_work)) + flush_workqueue(md_misc_wq); /* Then retry the open from the top */ return -ERESTARTSYS; } diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index cdafa5e0ea6dbf772e2daa43079a6d8d767d9a15..e179c121c03003a9aa4fbc1c8b423326a886b3a9 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -152,6 +152,9 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf) if (conf->nr_strip_zones == 1) { conf->layout = RAID0_ORIG_LAYOUT; + } else if (mddev->layout == RAID0_ORIG_LAYOUT || + mddev->layout == RAID0_ALT_MULTIZONE_LAYOUT) { + conf->layout = mddev->layout; } else if (default_layout == RAID0_ORIG_LAYOUT || default_layout == RAID0_ALT_MULTIZONE_LAYOUT) { conf->layout = default_layout; diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c index 0d7d687aeea0ec23b0deaa2269573fcc6d36346b..061b7824f698fcf4cb60f808a1c3f1348ff63439 100644 --- a/drivers/media/cec/cec-adap.c +++ b/drivers/media/cec/cec-adap.c @@ -1624,6 +1624,10 @@ int __cec_s_log_addrs(struct cec_adapter *adap, unsigned j; log_addrs->log_addr[i] = CEC_LOG_ADDR_INVALID; + if (log_addrs->log_addr_type[i] > CEC_LOG_ADDR_TYPE_UNREGISTERED) { + dprintk(1, "unknown logical address type\n"); + return -EINVAL; + } if (type_mask & (1 << log_addrs->log_addr_type[i])) { dprintk(1, "duplicate logical address type\n"); return -EINVAL; @@ -1644,10 +1648,6 @@ int __cec_s_log_addrs(struct cec_adapter *adap, dprintk(1, "invalid primary device type\n"); return -EINVAL; } - if (log_addrs->log_addr_type[i] > CEC_LOG_ADDR_TYPE_UNREGISTERED) { - dprintk(1, "unknown logical address type\n"); - return -EINVAL; - } for (j = 0; j < feature_sz; j++) { if ((features[j] & 0x80) == 0) { if (op_is_dev_features) diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c index 69f564b0837a7f66e46f8fd60764c4d8dbc86bf6..eb0331b8a5833b5daf1ea095a0ae70de49a961be 100644 --- a/drivers/media/i2c/ov5640.c +++ b/drivers/media/i2c/ov5640.c @@ -2298,8 +2298,8 @@ static int ov5640_probe(struct i2c_client *client, free_ctrls: v4l2_ctrl_handler_free(&sensor->ctrls.handler); entity_cleanup: - mutex_destroy(&sensor->lock); media_entity_cleanup(&sensor->sd.entity); + mutex_destroy(&sensor->lock); return ret; } @@ -2309,9 +2309,9 @@ static int ov5640_remove(struct i2c_client *client) struct ov5640_dev *sensor = to_ov5640_dev(sd); v4l2_async_unregister_subdev(&sensor->sd); - mutex_destroy(&sensor->lock); media_entity_cleanup(&sensor->sd.entity); v4l2_ctrl_handler_free(&sensor->ctrls.handler); + mutex_destroy(&sensor->lock); return 0; } diff --git a/drivers/media/platform/msm/ais/ais_isp/utils/ais_isp_trace.c b/drivers/media/platform/msm/ais/ais_isp/utils/ais_isp_trace.c index 659acf4b11865d91767b43cbb1f2167cb2485852..59590995762febfaccf8aefc0487fb7c59bd004c 100644 --- a/drivers/media/platform/msm/ais/ais_isp/utils/ais_isp_trace.c +++ b/drivers/media/platform/msm/ais/ais_isp/utils/ais_isp_trace.c @@ -13,4 +13,26 @@ /* Instantiate tracepoints */ #define CREATE_TRACE_POINTS + +#include +#include + #include "ais_isp_trace.h" + +static uint debug_trace; +module_param(debug_trace, uint, 0644); + +void ais_trace_print(char c, int value, const char *fmt, ...) +{ + if (debug_trace) { + char str_buffer[256]; + va_list args; + + va_start(args, fmt); + vsnprintf(str_buffer, 256, fmt, args); + + trace_ais_tracing_mark_write(c, current, str_buffer, value); + va_end(args); + } +} + diff --git a/drivers/media/platform/msm/ais/ais_isp/utils/ais_isp_trace.h b/drivers/media/platform/msm/ais/ais_isp/utils/ais_isp_trace.h index e2ff0184306ea1df21f4eb8df0b44f5d7b71102f..00f2116124b3b5a6c97276b8bdd00e1b14d622ee 100644 --- a/drivers/media/platform/msm/ais/ais_isp/utils/ais_isp_trace.h +++ b/drivers/media/platform/msm/ais/ais_isp/utils/ais_isp_trace.h @@ -223,6 +223,36 @@ TRACE_EVENT(ais_isp_vfe_error, ) ); +TRACE_EVENT(ais_tracing_mark_write, + TP_PROTO(char trace_type, const struct task_struct *task, + const char *name, int value), + TP_ARGS(trace_type, task, name, value), + TP_STRUCT__entry( + __field(char, trace_type) + __field(int, pid) + __string(trace_name, name) + __field(int, value) + ), + TP_fast_assign( + __entry->trace_type = trace_type; + __entry->pid = task ? task->tgid : 0; + __assign_str(trace_name, name); + __entry->value = value; + ), + TP_printk("%c|%d|%s|%d", __entry->trace_type, + __entry->pid, __get_str(trace_name), __entry->value) +); + +void ais_trace_print(char c, int value, const char *fmt, ...); + +#define AIS_ATRACE_BEGIN(fmt, args...) ais_trace_print('B', 0, fmt, ##args) + +#define AIS_ATRACE_END(fmt, args...) ais_trace_print('E', 0, fmt, ##args) + +#define AIS_ATRACE_INT(value, fmt, args...) \ + ais_trace_print('C', value, fmt, ##args) + + #endif /* _AIS_ISP_TRACE_H */ /* This part must be outside protection */ diff --git a/drivers/media/platform/msm/ais/ais_isp/vfe_hw/ais_vfe_core.c b/drivers/media/platform/msm/ais/ais_isp/vfe_hw/ais_vfe_core.c index ba0c2b9e13888b525505864f38498a9096bd0113..60249b88868e762b13698784957cb034d89ca1c1 100644 --- a/drivers/media/platform/msm/ais/ais_isp/vfe_hw/ais_vfe_core.c +++ b/drivers/media/platform/msm/ais/ais_isp/vfe_hw/ais_vfe_core.c @@ -1083,7 +1083,11 @@ static int ais_vfe_handle_sof( if (p_rdi->state != AIS_ISP_RESOURCE_STATE_STREAMING) continue; + AIS_ATRACE_BEGIN("SOF_%u_%u_%lu", + core_info->vfe_idx, path, p_rdi->frame_cnt); ais_vfe_handle_sof_rdi(core_info, work_data, path); + AIS_ATRACE_END("SOF_%u_%u_%lu", + core_info->vfe_idx, path, p_rdi->frame_cnt); //enq buffers spin_lock_bh(&p_rdi->buffer_lock); @@ -1344,8 +1348,12 @@ static int ais_vfe_bus_handle_frame_done( if (client_mask & (0x1 << client)) { //process frame done + AIS_ATRACE_BEGIN("FD_%u_%u_%lu", + core_info->vfe_idx, client, p_rdi->frame_cnt); ais_vfe_bus_handle_client_frame_done(core_info, client, work_data->last_addr[client]); + AIS_ATRACE_END("FD_%u_%u_%lu", + core_info->vfe_idx, client, p_rdi->frame_cnt); } } @@ -1417,8 +1425,11 @@ static int ais_vfe_handle_bus_wr_irq(struct cam_hw_info *vfe_hw, work_data->bus_wr_status[1], work_data->bus_wr_status[2]); - if (work_data->bus_wr_status[1]) + if (work_data->bus_wr_status[1]) { + AIS_ATRACE_BEGIN("FD_%d", core_info->vfe_idx); ais_vfe_bus_handle_frame_done(core_info, work_data); + AIS_ATRACE_END("FD_%d", core_info->vfe_idx); + } if (work_data->bus_wr_status[0] & 0x7800) { CAM_ERR(CAM_ISP, "VFE%d: WR BUS error occurred status = 0x%x", @@ -1467,7 +1478,9 @@ static int ais_vfe_process_irq_bh(void *priv, void *data) switch (work_data->evt_type) { case AIS_VFE_HW_IRQ_EVENT_SOF: + AIS_ATRACE_BEGIN("SOF_%d", core_info->vfe_idx); rc = ais_vfe_handle_sof(core_info, work_data); + AIS_ATRACE_END("SOF_%d", core_info->vfe_idx); break; case AIS_VFE_HW_IRQ_EVENT_BUS_WR: rc = ais_vfe_handle_bus_wr_irq(vfe_hw, core_info, work_data); diff --git a/drivers/media/platform/msm/ais/ais_isp/vfe_hw/ais_vfe_core.h b/drivers/media/platform/msm/ais/ais_isp/vfe_hw/ais_vfe_core.h index ad097ce4bd0fcd090686e7e037a8f824a81e1ff2..747f422a113de64e24585ee3b375da794953747d 100644 --- a/drivers/media/platform/msm/ais/ais_isp/vfe_hw/ais_vfe_core.h +++ b/drivers/media/platform/msm/ais/ais_isp/vfe_hw/ais_vfe_core.h @@ -21,7 +21,7 @@ #include "ais_vfe_top_ver2.h" #define AIS_VFE_WORKQ_NUM_TASK 20 -#define AIS_VFE_MAX_BUF 12 +#define AIS_VFE_MAX_BUF 20 #define AIS_VFE_MAX_SOF_INFO 8 enum ais_vfe_hw_irq_event { diff --git a/drivers/media/platform/msm/ais/ais_isp/vfe_hw/ais_vfe_soc.c b/drivers/media/platform/msm/ais/ais_isp/vfe_hw/ais_vfe_soc.c index 85155e420984804d0ee7780a07cb7abdd6ab4128..3d93340c0f860aa37520e7093bbfc7af56b2ef98 100644 --- a/drivers/media/platform/msm/ais/ais_isp/vfe_hw/ais_vfe_soc.c +++ b/drivers/media/platform/msm/ais/ais_isp/vfe_hw/ais_vfe_soc.c @@ -197,11 +197,12 @@ int ais_vfe_deinit_soc_resources(struct cam_hw_soc_info *soc_info) if (rc) CAM_ERR(CAM_ISP, "CPAS0 unregistration failed rc=%d", rc); - if (!rc && soc_private->cpas_version == CAM_CPAS_TITAN_175_V120) + if (!rc && soc_private->cpas_version == CAM_CPAS_TITAN_175_V120) { rc = cam_cpas_unregister_client(soc_private->cpas_handle[1]); if (rc) CAM_ERR(CAM_ISP, "CPAS1 unregistration failed rc=%d", rc); + } rc = ais_vfe_release_platform_resource(soc_info); if (rc < 0) diff --git a/drivers/media/platform/msm/ais/cam_isp/cam_isp_context.c b/drivers/media/platform/msm/ais/cam_isp/cam_isp_context.c index 6cfbf7b89a307234c40c583f752ece892905f59a..e5a52c96fd691453f90a496ce7432d7cae60e272 100644 --- a/drivers/media/platform/msm/ais/cam_isp/cam_isp_context.c +++ b/drivers/media/platform/msm/ais/cam_isp/cam_isp_context.c @@ -2185,7 +2185,7 @@ static int __cam_isp_ctx_dump_req_info(struct cam_context *ctx, uint8_t *dst; if (!req || !ctx || !offset || !cpu_addr || !buf_len) { - CAM_ERR(CAM_ISP, "Invalid parameters %pK %pK %u %pK %pK %pK ", + CAM_ERR(CAM_ISP, "Invalid parameters %pK %pK %pK %lx %zu ", req, ctx, offset, cpu_addr, buf_len); return -EINVAL; } diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c index 52401195991d2b358feac59c0f5ecfd4905d9ac5..724cf8785df25413a87bd511f3375a56bc46301a 100644 --- a/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c +++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -1291,6 +1291,10 @@ void msm_vfe47_cfg_fetch_engine(struct vfe_device *vfe_dev, case V4L2_PIX_FMT_P16GBRG10: case V4L2_PIX_FMT_P16GRBG10: case V4L2_PIX_FMT_P16RGGB10: + case V4L2_PIX_FMT_P16BGGR12: + case V4L2_PIX_FMT_P16GBRG12: + case V4L2_PIX_FMT_P16GRBG12: + case V4L2_PIX_FMT_P16RGGB12: main_unpack_pattern = 0xB210; break; default: diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c index 7e49dcf39e367dfb2b381cbf362571668f21cd5d..3a95273694cb7b72c08afe81273cee529795e590 100644 --- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c +++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -1021,6 +1021,8 @@ void msm_isp_increment_frame_id(struct vfe_device *vfe_dev, ISP_EVENT_REG_UPDATE_MISSING); } } + vfe_dev->isp_page->kernel_sofid = + vfe_dev->axi_data.src_info[frame_src].frame_id; } static void msm_isp_update_pd_stats_idx(struct vfe_device *vfe_dev, @@ -2569,7 +2571,8 @@ static void msm_isp_input_enable(struct vfe_device *vfe_dev, if (axi_data->src_info[i].active) continue; /* activate the input since it is deactivated */ - axi_data->src_info[i].frame_id = 0; + if (!ext_read) + axi_data->src_info[i].frame_id = 0; vfe_dev->irq_sof_id = 0; if (axi_data->src_info[i].input_mux != EXTERNAL_READ) axi_data->src_info[i].active = 1; @@ -3780,8 +3783,7 @@ static int msm_isp_request_frame(struct vfe_device *vfe_dev, return 0; } else if ((vfe_dev->axi_data.src_info[frame_src].active && (frame_id != vfe_dev->axi_data.src_info[frame_src].frame_id + - vfe_dev->axi_data.src_info[frame_src].sof_counter_step)) || - ((!vfe_dev->axi_data.src_info[frame_src].active))) { + vfe_dev->axi_data.src_info[frame_src].sof_counter_step))) { pr_debug("%s:%d invalid frame id %d cur frame id %d pix %d\n", __func__, __LINE__, frame_id, vfe_dev->axi_data.src_info[frame_src].frame_id, diff --git a/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c b/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c index 579d3d9a9cace2191e0c9ce5c7a2b95e6e26edf3..9a9b1b361a4519a865878ca1509b8435991b54c8 100644 --- a/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c +++ b/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2019, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -785,7 +785,7 @@ static int msm_vidc_pm_suspend(struct device *dev) static int msm_vidc_pm_resume(struct device *dev) { - place_marker("vidc resumed"); + update_marker("vidc resumed"); dprintk(VIDC_INFO, "%s\n", __func__); return 0; } diff --git a/drivers/media/platform/msm/vidc_3x/msm_vidc.c b/drivers/media/platform/msm/vidc_3x/msm_vidc.c index 6c8a7a1f2f03063e1258fe25bf9d0c80728c32a3..3c95203006cd904e95992d6dc242d489c7911e6b 100644 --- a/drivers/media/platform/msm/vidc_3x/msm_vidc.c +++ b/drivers/media/platform/msm/vidc_3x/msm_vidc.c @@ -533,7 +533,7 @@ int map_and_register_buf(struct msm_vidc_inst *inst, struct v4l2_buffer *b) } dprintk(VIDC_DBG, "%s: [MAP] binfo = %pK, handle[%d] = %pK, device_addr = %pa, fd = %d, offset = %d, mapped = %d\n", - __func__, binfo, i, binfo->smem[i], + __func__, binfo, i, &binfo->smem[i], &binfo->device_addr[i], binfo->fd[i], binfo->buff_off[i], binfo->mapped[i]); } @@ -586,7 +586,7 @@ int unmap_and_deregister_buf(struct msm_vidc_inst *inst, for (i = 0; i < temp->num_planes; i++) { dprintk(VIDC_DBG, "%s: [UNMAP] binfo = %pK, handle[%d] = %pK, device_addr = %pa, fd = %d, offset = %d, mapped = %d\n", - __func__, temp, i, temp->smem[i], + __func__, temp, i, &temp->smem[i], &temp->device_addr[i], temp->fd[i], temp->buff_off[i], temp->mapped[i]); /* @@ -906,7 +906,7 @@ int msm_vidc_release_buffers(void *instance, int buffer_type) if (bi->mapped[i]) { dprintk(VIDC_DBG, "%s: [UNMAP] binfo = %pK, handle[%d] = %pK, device_addr = %pa, fd = %d, offset = %d, mapped = %d\n", - __func__, bi, i, bi->smem[i], + __func__, bi, i, &bi->smem[i], &bi->device_addr[i], bi->fd[i], bi->buff_off[i], bi->mapped[i]); msm_comm_smem_free(inst, diff --git a/drivers/media/platform/rcar-fcp.c b/drivers/media/platform/rcar-fcp.c index 2988031d285d4895975cf4490a6aa2e3de6fb301..0047d144c9326ba6ba72a8a052f8ff6a5fd7ec82 100644 --- a/drivers/media/platform/rcar-fcp.c +++ b/drivers/media/platform/rcar-fcp.c @@ -12,6 +12,7 @@ */ #include +#include #include #include #include @@ -24,6 +25,7 @@ struct rcar_fcp_device { struct list_head list; struct device *dev; + struct device_dma_parameters dma_parms; }; static LIST_HEAD(fcp_devices); @@ -139,6 +141,9 @@ static int rcar_fcp_probe(struct platform_device *pdev) fcp->dev = &pdev->dev; + fcp->dev->dma_parms = &fcp->dma_parms; + dma_set_max_seg_size(fcp->dev, DMA_BIT_MASK(32)); + pm_runtime_enable(&pdev->dev); mutex_lock(&fcp_lock); diff --git a/drivers/media/platform/rcar_fdp1.c b/drivers/media/platform/rcar_fdp1.c index d8d406c79cfa1e9f160f8ced7acf43823c4806fd..5965e34e36ccecb88e9e13d1e917e841a168140f 100644 --- a/drivers/media/platform/rcar_fdp1.c +++ b/drivers/media/platform/rcar_fdp1.c @@ -2372,7 +2372,7 @@ static int fdp1_probe(struct platform_device *pdev) dprintk(fdp1, "FDP1 Version R-Car H3\n"); break; case FD1_IP_M3N: - dprintk(fdp1, "FDP1 Version R-Car M3N\n"); + dprintk(fdp1, "FDP1 Version R-Car M3-N\n"); break; case FD1_IP_E3: dprintk(fdp1, "FDP1 Version R-Car E3\n"); diff --git a/drivers/media/tuners/si2157.c b/drivers/media/tuners/si2157.c index e35b1faf0ddc0d68d199be1e5c4dd65e319684b4..c826997f5433039dfc56797e13a744a5697710a4 100644 --- a/drivers/media/tuners/si2157.c +++ b/drivers/media/tuners/si2157.c @@ -84,24 +84,23 @@ static int si2157_init(struct dvb_frontend *fe) struct si2157_cmd cmd; const struct firmware *fw; const char *fw_name; - unsigned int uitmp, chip_id; + unsigned int chip_id, xtal_trim; dev_dbg(&client->dev, "\n"); - /* Returned IF frequency is garbage when firmware is not running */ - memcpy(cmd.args, "\x15\x00\x06\x07", 4); + /* Try to get Xtal trim property, to verify tuner still running */ + memcpy(cmd.args, "\x15\x00\x04\x02", 4); cmd.wlen = 4; cmd.rlen = 4; ret = si2157_cmd_execute(client, &cmd); - if (ret) - goto err; - uitmp = cmd.args[2] << 0 | cmd.args[3] << 8; - dev_dbg(&client->dev, "if_frequency kHz=%u\n", uitmp); + xtal_trim = cmd.args[2] | (cmd.args[3] << 8); - if (uitmp == dev->if_frequency / 1000) + if (ret == 0 && xtal_trim < 16) goto warm; + dev->if_frequency = 0; /* we no longer know current tuner state */ + /* power up */ if (dev->chiptype == SI2157_CHIPTYPE_SI2146) { memcpy(cmd.args, "\xc0\x05\x01\x00\x00\x0b\x00\x00\x01", 9); diff --git a/drivers/media/usb/dvb-usb/dibusb-mb.c b/drivers/media/usb/dvb-usb/dibusb-mb.c index a0057641cc86838d80d694ca9511e8547a37e480..c55180912c3aec3810de62f62e97b37521588e1b 100644 --- a/drivers/media/usb/dvb-usb/dibusb-mb.c +++ b/drivers/media/usb/dvb-usb/dibusb-mb.c @@ -84,7 +84,7 @@ static int dibusb_tuner_probe_and_attach(struct dvb_usb_adapter *adap) if (i2c_transfer(&adap->dev->i2c_adap, msg, 2) != 2) { err("tuner i2c write failed."); - ret = -EREMOTEIO; + return -EREMOTEIO; } if (adap->fe_adap[0].fe->ops.i2c_gate_ctrl) diff --git a/drivers/media/usb/go7007/snd-go7007.c b/drivers/media/usb/go7007/snd-go7007.c index c618764480c67eabc0717e6393208087a26b3156..a19c010831249785af70decbe602f7972154e37b 100644 --- a/drivers/media/usb/go7007/snd-go7007.c +++ b/drivers/media/usb/go7007/snd-go7007.c @@ -243,22 +243,18 @@ int go7007_snd_init(struct go7007 *go) gosnd->capturing = 0; ret = snd_card_new(go->dev, index[dev], id[dev], THIS_MODULE, 0, &gosnd->card); - if (ret < 0) { - kfree(gosnd); - return ret; - } + if (ret < 0) + goto free_snd; + ret = snd_device_new(gosnd->card, SNDRV_DEV_LOWLEVEL, go, &go7007_snd_device_ops); - if (ret < 0) { - kfree(gosnd); - return ret; - } + if (ret < 0) + goto free_card; + ret = snd_pcm_new(gosnd->card, "go7007", 0, 0, 1, &gosnd->pcm); - if (ret < 0) { - snd_card_free(gosnd->card); - kfree(gosnd); - return ret; - } + if (ret < 0) + goto free_card; + strlcpy(gosnd->card->driver, "go7007", sizeof(gosnd->card->driver)); strlcpy(gosnd->card->shortname, go->name, sizeof(gosnd->card->driver)); strlcpy(gosnd->card->longname, gosnd->card->shortname, @@ -269,11 +265,8 @@ int go7007_snd_init(struct go7007 *go) &go7007_snd_capture_ops); ret = snd_card_register(gosnd->card); - if (ret < 0) { - snd_card_free(gosnd->card); - kfree(gosnd); - return ret; - } + if (ret < 0) + goto free_card; gosnd->substream = NULL; go->snd_context = gosnd; @@ -281,6 +274,12 @@ int go7007_snd_init(struct go7007 *go) ++dev; return 0; + +free_card: + snd_card_free(gosnd->card); +free_snd: + kfree(gosnd); + return ret; } EXPORT_SYMBOL(go7007_snd_init); diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c index 6ba07c7feb92b37f7cb94a3b0cd45aa52d3e2435..2af7ae13449d32e8327178eac76a71eca873348b 100644 --- a/drivers/message/fusion/mptscsih.c +++ b/drivers/message/fusion/mptscsih.c @@ -118,8 +118,6 @@ int mptscsih_suspend(struct pci_dev *pdev, pm_message_t state); int mptscsih_resume(struct pci_dev *pdev); #endif -#define SNS_LEN(scp) SCSI_SENSE_BUFFERSIZE - /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* @@ -2420,7 +2418,7 @@ mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR /* Copy the sense received into the scsi command block. */ req_index = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx); sense_data = ((u8 *)ioc->sense_buf_pool + (req_index * MPT_SENSE_BUFFER_ALLOC)); - memcpy(sc->sense_buffer, sense_data, SNS_LEN(sc)); + memcpy(sc->sense_buffer, sense_data, MPT_SENSE_BUFFER_ALLOC); /* Log SMART data (asc = 0x5D, non-IM case only) if required. */ diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c index 953d0790ffd566e967058f7eeb640ff51377cf45..3259fb82d3c46588dd2c15e2dd31938a2bb9bfce 100644 --- a/drivers/mfd/wm8994-core.c +++ b/drivers/mfd/wm8994-core.c @@ -696,3 +696,4 @@ module_i2c_driver(wm8994_i2c_driver); MODULE_DESCRIPTION("Core support for the WM8994 audio CODEC"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Mark Brown "); +MODULE_SOFTDEP("pre: wm8994_regulator"); diff --git a/drivers/misc/atmel-ssc.c b/drivers/misc/atmel-ssc.c index d8e3cc2dc7470d8deaa3b89a5855b22b034725db..f9caf233e2cc05225b818857b7bdd340c90f4d50 100644 --- a/drivers/misc/atmel-ssc.c +++ b/drivers/misc/atmel-ssc.c @@ -13,7 +13,7 @@ #include #include #include -#include +#include #include #include #include @@ -23,7 +23,7 @@ #include "../../sound/soc/atmel/atmel_ssc_dai.h" /* Serialize access to ssc_list and user count */ -static DEFINE_SPINLOCK(user_lock); +static DEFINE_MUTEX(user_lock); static LIST_HEAD(ssc_list); struct ssc_device *ssc_request(unsigned int ssc_num) @@ -31,7 +31,7 @@ struct ssc_device *ssc_request(unsigned int ssc_num) int ssc_valid = 0; struct ssc_device *ssc; - spin_lock(&user_lock); + mutex_lock(&user_lock); list_for_each_entry(ssc, &ssc_list, list) { if (ssc->pdev->dev.of_node) { if (of_alias_get_id(ssc->pdev->dev.of_node, "ssc") @@ -47,18 +47,18 @@ struct ssc_device *ssc_request(unsigned int ssc_num) } if (!ssc_valid) { - spin_unlock(&user_lock); + mutex_unlock(&user_lock); pr_err("ssc: ssc%d platform device is missing\n", ssc_num); return ERR_PTR(-ENODEV); } if (ssc->user) { - spin_unlock(&user_lock); + mutex_unlock(&user_lock); dev_dbg(&ssc->pdev->dev, "module busy\n"); return ERR_PTR(-EBUSY); } ssc->user++; - spin_unlock(&user_lock); + mutex_unlock(&user_lock); clk_prepare(ssc->clk); @@ -70,14 +70,14 @@ void ssc_free(struct ssc_device *ssc) { bool disable_clk = true; - spin_lock(&user_lock); + mutex_lock(&user_lock); if (ssc->user) ssc->user--; else { disable_clk = false; dev_dbg(&ssc->pdev->dev, "device already free\n"); } - spin_unlock(&user_lock); + mutex_unlock(&user_lock); if (disable_clk) clk_unprepare(ssc->clk); @@ -240,9 +240,9 @@ static int ssc_probe(struct platform_device *pdev) return -ENXIO; } - spin_lock(&user_lock); + mutex_lock(&user_lock); list_add_tail(&ssc->list, &ssc_list); - spin_unlock(&user_lock); + mutex_unlock(&user_lock); platform_set_drvdata(pdev, ssc); @@ -261,9 +261,9 @@ static int ssc_remove(struct platform_device *pdev) ssc_sound_dai_remove(ssc); - spin_lock(&user_lock); + mutex_lock(&user_lock); list_del(&ssc->list); - spin_unlock(&user_lock); + mutex_unlock(&user_lock); return 0; } diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c index 8f6ab516041b471cb95a7b14515cb94c21adb8d3..73a93e3fc18d2dda1ca3c01e2c1c847a5c18b246 100644 --- a/drivers/misc/mei/bus.c +++ b/drivers/misc/mei/bus.c @@ -722,9 +722,8 @@ static int mei_cl_device_remove(struct device *dev) mei_cldev_unregister_callbacks(cldev); module_put(THIS_MODULE); - dev->driver = NULL; - return ret; + return ret; } static ssize_t name_show(struct device *dev, struct device_attribute *a, diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c index be64969d986abcd6e6da6fcec6bb2f019b404a22..391c6d4855ff491eff8104d01e92b4993b6ff2bb 100644 --- a/drivers/misc/mei/client.c +++ b/drivers/misc/mei/client.c @@ -276,6 +276,7 @@ void mei_me_cl_rm_by_uuid(struct mei_device *dev, const uuid_le *uuid) down_write(&dev->me_clients_rwsem); me_cl = __mei_me_cl_by_uuid(dev, uuid); __mei_me_cl_del(dev, me_cl); + mei_me_cl_put(me_cl); up_write(&dev->me_clients_rwsem); } @@ -297,6 +298,7 @@ void mei_me_cl_rm_by_uuid_id(struct mei_device *dev, const uuid_le *uuid, u8 id) down_write(&dev->me_clients_rwsem); me_cl = __mei_me_cl_by_uuid_id(dev, uuid, id); __mei_me_cl_del(dev, me_cl); + mei_me_cl_put(me_cl); up_write(&dev->me_clients_rwsem); } diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c index 66ed5193add49d7f7e9d717305e4ad9df4f39ab1..191ef4d2271bfb27fc6da427c859f94a783dae50 100644 --- a/drivers/misc/qseecom.c +++ b/drivers/misc/qseecom.c @@ -847,6 +847,13 @@ static int qseecom_scm_call2(uint32_t svc_id, uint32_t tz_cmd_id, ret = __qseecom_scm_call2_locked(smc_id, &desc); break; } + case QSEOS_SOTA_NOTIFICATION_CHECK_STATUS: { + smc_id = TZ_SOTA_UPDATE_NOTIFICATION_ID; + desc.arginfo = TZ_SOTA_UPDATE_NOTIFICATION_ID_PARAM_ID; + __qseecom_reentrancy_check_if_no_app_blocked(smc_id); + ret = __qseecom_scm_call2_locked(smc_id, &desc); + break; + } case QSEOS_GENERATE_KEY: { u32 tzbuflen = PAGE_ALIGN(sizeof (struct qseecom_key_generate_ireq) - @@ -2901,8 +2908,9 @@ static int __qseecom_unload_app(struct qseecom_dev_handle *data, sizeof(struct qseecom_unload_app_ireq), &resp, sizeof(resp)); if (ret) { - pr_err("scm_call to unload app (id = %d) failed\n", app_id); - return -EFAULT; + pr_err("scm_call to unload app (id = %d) failed ret: %d\n", + app_id, ret); + return ret; } switch (resp.result) { case QSEOS_RESULT_SUCCESS: @@ -3153,6 +3161,42 @@ int __qseecom_process_rpmb_svc_cmd(struct qseecom_dev_handle *data_ptr, return ret; } +static int __qseecom_process_sota_svc_cmd(struct qseecom_dev_handle *data_ptr, + struct qseecom_send_svc_cmd_req *req_ptr, + struct qseecom_client_send_service_ireq *send_svc_ireq_ptr) +{ + int ret = 0; + void *req_buf = NULL; + + if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) { + pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n", + req_ptr, send_svc_ireq_ptr); + return -EINVAL; + } + + /* Clients need to ensure req_buf is at base offset of shared buffer */ + if ((uintptr_t)req_ptr->cmd_req_buf != + data_ptr->client.user_virt_sb_base) { + pr_err("cmd buf not pointing to base offset of shared buffer\n"); + return -EINVAL; + } + + if (data_ptr->client.sb_length < + sizeof(struct qseecom_rpmb_provision_key)) { + pr_err("shared buffer is too small to hold key type\n"); + return -EINVAL; + } + req_buf = data_ptr->client.sb_virt; + + send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id; + send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len; + send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys( + data_ptr, (uintptr_t)req_ptr->resp_buf)); + send_svc_ireq_ptr->rsp_len = req_ptr->resp_len; + + return ret; +} + int __qseecom_process_fsm_key_svc_cmd(struct qseecom_dev_handle *data_ptr, struct qseecom_send_svc_cmd_req *req_ptr, struct qseecom_client_send_fsm_key_req *send_svc_ireq_ptr) @@ -3296,7 +3340,6 @@ static int qseecom_send_service_cmd(struct qseecom_dev_handle *data, return -EINVAL; data->type = QSEECOM_SECURE_SERVICE; - switch (req.cmd_id) { case QSEOS_RPMB_PROVISION_KEY_COMMAND: case QSEOS_RPMB_ERASE_COMMAND: @@ -3307,6 +3350,13 @@ static int qseecom_send_service_cmd(struct qseecom_dev_handle *data, send_req_ptr)) return -EINVAL; break; + case QSEOS_SOTA_NOTIFICATION_CHECK_STATUS: + send_req_ptr = &send_svc_ireq; + req_buf_size = sizeof(send_svc_ireq); + if (__qseecom_process_sota_svc_cmd(data, &req, + send_req_ptr)) + return -EINVAL; + break; case QSEOS_FSM_LTEOTA_REQ_CMD: case QSEOS_FSM_LTEOTA_REQ_RSP_CMD: case QSEOS_FSM_IKE_REQ_CMD: diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig index 4e4c9f1695d32b458d0f88a7e6263bbaa0350bf9..9435832d4777ce09d5717f62b16b82188383f6ae 100644 --- a/drivers/mmc/core/Kconfig +++ b/drivers/mmc/core/Kconfig @@ -138,3 +138,12 @@ config MMC_SIMULATE_MAX_SPEED control the write or read maximum KB/second speed behaviors. If unsure, say N here. + +config MMC_CRYPTO + bool "MMC Crypto Engine Support" + depends on BLK_INLINE_ENCRYPTION + help + Enable Crypto Engine Support in MMC. + Enabling this makes it possible for the kernel to use the crypto + capabilities of the MMC device (if present) to perform crypto + operations on data being transferred to/from the device. diff --git a/drivers/mmc/core/Makefile b/drivers/mmc/core/Makefile index ffeb27b9c4b189dff0ab54ed4a7b186d803b134d..e5d6a54e8c99c1a25ae648728f1846dc28b6cc2f 100644 --- a/drivers/mmc/core/Makefile +++ b/drivers/mmc/core/Makefile @@ -19,3 +19,4 @@ obj-$(CONFIG_MMC_BLOCK) += mmc_block.o mmc_block-objs := block.o queue.o obj-$(CONFIG_MMC_TEST) += mmc_test.o obj-$(CONFIG_SDIO_UART) += sdio_uart.o +mmc_core-$(CONFIG_MMC_CRYPTO) += crypto.o diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index eb4c3913db621af4ddda0caaa9003b3180bff7ee..b1f06a15c6234ed58fa3ca68e2ce883354ffe81c 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -54,6 +54,7 @@ #include "block.h" #include "core.h" #include "card.h" +#include "crypto.h" #include "host.h" #include "bus.h" #include "mmc_ops.h" @@ -2129,6 +2130,8 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq, memset(brq, 0, sizeof(struct mmc_blk_request)); + mmc_crypto_prepare_req(mqrq); + brq->mrq.data = &brq->data; brq->stop.opcode = MMC_STOP_TRANSMISSION; diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 4ef30120be4df7176466f4a23cf5763ea77cd542..412a81fdf3ad71c13ea180250e2243bb28350e5b 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -515,7 +515,7 @@ static int mmc_devfreq_set_target(struct device *dev, *freq, current->comm); spin_lock_bh(&clk_scaling->lock); - if (clk_scaling->target_freq == *freq || + if (clk_scaling->curr_freq == *freq || clk_scaling->skip_clk_scale_freq_update) { spin_unlock_bh(&clk_scaling->lock); goto out; diff --git a/drivers/mmc/core/crypto.c b/drivers/mmc/core/crypto.c new file mode 100644 index 0000000000000000000000000000000000000000..661e7f862ffce49a3a25fb0547294fb78b86db99 --- /dev/null +++ b/drivers/mmc/core/crypto.c @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2020 Google LLC + */ + +#include +#include +#include +#include + +#include "core.h" +#include "queue.h" + +void mmc_crypto_setup_queue(struct mmc_host *host, struct request_queue *q) +{ + if (host->caps2 & MMC_CAP2_CRYPTO) + q->ksm = host->ksm; +} +EXPORT_SYMBOL_GPL(mmc_crypto_setup_queue); + +void mmc_crypto_free_host(struct mmc_host *host) +{ + keyslot_manager_destroy(host->ksm); +} + +void mmc_crypto_prepare_req(struct mmc_queue_req *mqrq) +{ + struct request *req = mmc_queue_req_to_req(mqrq); + struct mmc_request *mrq = &mqrq->brq.mrq; + const struct bio_crypt_ctx *bc; + + if (!bio_crypt_should_process(req)) + return; + + bc = req->bio->bi_crypt_context; + mrq->crypto_key_slot = bc->bc_keyslot; + mrq->data_unit_num = bc->bc_dun[0]; + mrq->crypto_key = bc->bc_key; +} +EXPORT_SYMBOL_GPL(mmc_crypto_prepare_req); diff --git a/drivers/mmc/core/crypto.h b/drivers/mmc/core/crypto.h new file mode 100644 index 0000000000000000000000000000000000000000..74145c36241b6936186f6582795af2c165b27b24 --- /dev/null +++ b/drivers/mmc/core/crypto.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright 2020 Google LLC + */ + +#ifndef _MMC_CORE_CRYPTO_H +#define _MMC_CORE_CRYPTO_H + +struct mmc_host; +struct mmc_queue_req; +struct request; +struct request_queue; + +#ifdef CONFIG_MMC_CRYPTO + +void mmc_crypto_setup_queue(struct mmc_host *host, struct request_queue *q); + +void mmc_crypto_free_host(struct mmc_host *host); + +void mmc_crypto_prepare_req(struct mmc_queue_req *mqrq); + +#else /* CONFIG_MMC_CRYPTO */ + +static inline void mmc_crypto_setup_queue(struct mmc_host *host, + struct request_queue *q) { } + +static inline void mmc_crypto_free_host(struct mmc_host *host) { } + +static inline void mmc_crypto_prepare_req(struct mmc_queue_req *mqrq) { } + +#endif /* CONFIG_MMC_CRYPTO */ + +#endif /* _MMC_CORE_CRYPTO_H */ diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index 9eed28afe3b887aee9c7855e62f0a92a134a2c25..7ba6c5515d213720e2d9b336737a9bcc4f7ac0bb 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c @@ -30,6 +30,7 @@ #include #include "core.h" +#include "crypto.h" #include "host.h" #include "slot-gpio.h" #include "pwrseq.h" @@ -478,6 +479,16 @@ int mmc_retune(struct mmc_host *host) host->ops->prepare_hs400_tuning(host, &host->ios); } + /* + * Timing should be adjusted to the HS400 target + * operation frequency for tuning process. + * Similar handling is also done in mmc_hs200_tuning() + * This is handled properly in sdhci-msm.c from msm-5.4 onwards. + */ + if (host->card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400 && + host->ios.bus_width == MMC_BUS_WIDTH_8) + mmc_set_timing(host, MMC_TIMING_MMC_HS400); + err = mmc_execute_tuning(host->card); if (err) goto out; @@ -1047,6 +1058,7 @@ EXPORT_SYMBOL(mmc_remove_host); */ void mmc_free_host(struct mmc_host *host) { + mmc_crypto_free_host(host); mmc_pwrseq_free(host); put_device(&host->class_dev); } diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c index ba338d2a1c004dd464a38c645b4703d2c303179c..0256b69cb6336644c3d7853e533193c6aec74eb8 100644 --- a/drivers/mmc/core/queue.c +++ b/drivers/mmc/core/queue.c @@ -27,6 +27,7 @@ #include "queue.h" #include "block.h" #include "core.h" +#include "crypto.h" #include "card.h" /* @@ -493,6 +494,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, goto cleanup_queue; } + mmc_crypto_setup_queue(host, mq->queue); return 0; cleanup_queue: diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c index 0e82bc0318219dcbd9c964ae36216a30fb27e2ef..87987d40943b86e8770d934b7173e60ae7c07aaf 100644 --- a/drivers/mmc/core/sdio.c +++ b/drivers/mmc/core/sdio.c @@ -803,9 +803,8 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr, /* Retry init sequence, but without R4_18V_PRESENT. */ retries = 0; goto try_again; - } else { - goto remove; } + return err; } #ifdef CONFIG_MMC_EMBEDDED_SDIO } diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index f361eed351809c9a9a9d53fd123469abe810d7d9..f6299ddb0e859929a0a645400f1b9c5e533eb152 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -930,3 +930,9 @@ config MMC_CQ_HCI_CRYPTO_QTI Enable Vendor Crypto Engine Support in CQHCI Enabling this allows kernel to use CQHCI crypto operations defined and implemented by QTI. + +config MMC_QTI_NONCMDQ_ICE + bool "Vendor support Legacy Inline Crypto Engine" + default n + help + Enable to allow use of Legacy Inline Crypto Engine i.e ICE < 3.0. diff --git a/drivers/mmc/host/cmdq_hci-crypto-qti.c b/drivers/mmc/host/cmdq_hci-crypto-qti.c index d145339b70f061aa0e4da945f93927748e561f6c..873c436e4b09f0adbb3a16283b59213ed927e030 100644 --- a/drivers/mmc/host/cmdq_hci-crypto-qti.c +++ b/drivers/mmc/host/cmdq_hci-crypto-qti.c @@ -19,6 +19,12 @@ #include "sdhci-msm.h" #include "cmdq_hci-crypto-qti.h" #include +#include +#include +#if IS_ENABLED(CONFIG_CRYPTO_DEV_QCOM_ICE) +#include +#include +#endif #define RAW_SECRET_SIZE 32 #define MINIMUM_DUN_SIZE 512 @@ -30,8 +36,13 @@ static struct cmdq_host_crypto_variant_ops cmdq_crypto_qti_variant_ops = { .disable = cmdq_crypto_qti_disable, .resume = cmdq_crypto_qti_resume, .debug = cmdq_crypto_qti_debug, + .reset = cmdq_crypto_qti_reset, + .prepare_crypto_desc = cmdq_crypto_qti_prep_desc, }; +static atomic_t keycache; +static bool cmdq_use_default_du_size; + static bool ice_cap_idx_valid(struct cmdq_host *host, unsigned int cap_idx) { @@ -40,12 +51,19 @@ static bool ice_cap_idx_valid(struct cmdq_host *host, static uint8_t get_data_unit_size_mask(unsigned int data_unit_size) { + unsigned int du_size; + if (data_unit_size < MINIMUM_DUN_SIZE || data_unit_size > MAXIMUM_DUN_SIZE || !is_power_of_2(data_unit_size)) return 0; - return data_unit_size / MINIMUM_DUN_SIZE; + if (cmdq_use_default_du_size) + du_size = MINIMUM_DUN_SIZE; + else + du_size = data_unit_size; + + return du_size / MINIMUM_DUN_SIZE; } @@ -68,10 +86,14 @@ void cmdq_crypto_qti_enable(struct cmdq_host *host) void cmdq_crypto_qti_disable(struct cmdq_host *host) { - /* cmdq_crypto_disable_spec(host) and - * crypto_qti_disable(host->crypto_vops->priv) - * are needed here? - */ + cmdq_crypto_disable_spec(host); + crypto_qti_disable(host->crypto_vops->priv); +} + +int cmdq_crypto_qti_reset(struct cmdq_host *host) +{ + atomic_set(&keycache, 0); + return 0; } static int cmdq_crypto_qti_keyslot_program(struct keyslot_manager *ksm, @@ -86,9 +108,12 @@ static int cmdq_crypto_qti_keyslot_program(struct keyslot_manager *ksm, crypto_alg_id = cmdq_crypto_cap_find(host, key->crypto_mode, key->data_unit_size); + pm_runtime_get_sync(&host->mmc->card->dev); + if (!cmdq_is_crypto_enabled(host) || !cmdq_keyslot_valid(host, slot) || !ice_cap_idx_valid(host, crypto_alg_id)) { + pm_runtime_put_sync(&host->mmc->card->dev); return -EINVAL; } @@ -96,6 +121,7 @@ static int cmdq_crypto_qti_keyslot_program(struct keyslot_manager *ksm, if (!(data_unit_mask & host->crypto_cap_array[crypto_alg_id].sdus_mask)) { + pm_runtime_put_sync(&host->mmc->card->dev); return -EINVAL; } @@ -104,6 +130,8 @@ static int cmdq_crypto_qti_keyslot_program(struct keyslot_manager *ksm, if (err) pr_err("%s: failed with error %d\n", __func__, err); + pm_runtime_put_sync(&host->mmc->card->dev); + return err; } @@ -112,16 +140,25 @@ static int cmdq_crypto_qti_keyslot_evict(struct keyslot_manager *ksm, unsigned int slot) { int err = 0; + int val = 0; struct cmdq_host *host = keyslot_manager_private(ksm); + pm_runtime_get_sync(&host->mmc->card->dev); + if (!cmdq_is_crypto_enabled(host) || - !cmdq_keyslot_valid(host, slot)) + !cmdq_keyslot_valid(host, slot)) { + pm_runtime_put_sync(&host->mmc->card->dev); return -EINVAL; + } err = crypto_qti_keyslot_evict(host->crypto_vops->priv, slot); if (err) pr_err("%s: failed with error %d\n", __func__, err); + pm_runtime_put_sync(&host->mmc->card->dev); + val = atomic_read(&keycache) & ~(1 << slot); + atomic_set(&keycache, val); + return err; } @@ -156,6 +193,63 @@ enum blk_crypto_mode_num cmdq_blk_crypto_qti_mode_num_for_alg_dusize( return BLK_ENCRYPTION_MODE_INVALID; } +#if IS_ENABLED(CONFIG_MMC_QTI_NONCMDQ_ICE) +int cmdq_host_init_crypto_qti_spec(struct cmdq_host *host, + const struct keyslot_mgmt_ll_ops *ksm_ops) +{ + int err = 0; + unsigned int crypto_modes_supported[BLK_ENCRYPTION_MODE_MAX]; + enum blk_crypto_mode_num blk_mode_num; + + host->crypto_capabilities.reg_val = LEGACY_ICE_CAP_VAL; + host->crypto_cfg_register = (u32)host->icemmio; + host->crypto_cap_array = + devm_kcalloc(mmc_dev(host->mmc), + host->crypto_capabilities.num_crypto_cap, + sizeof(host->crypto_cap_array[0]), GFP_KERNEL); + if (!host->crypto_cap_array) { + err = -ENOMEM; + pr_err("%s failed to allocate memory\n", __func__); + goto out; + } + memset(crypto_modes_supported, 0, sizeof(crypto_modes_supported)); + + host->crypto_cap_array[CRYPTO_ICE_INDEX].algorithm_id = + CMDQ_CRYPTO_ALG_AES_XTS; + host->crypto_cap_array[CRYPTO_ICE_INDEX].key_size = + CMDQ_CRYPTO_KEY_SIZE_256; + + blk_mode_num = cmdq_blk_crypto_qti_mode_num_for_alg_dusize( + host->crypto_cap_array[CRYPTO_ICE_INDEX].algorithm_id, + host->crypto_cap_array[CRYPTO_ICE_INDEX].key_size); + + crypto_modes_supported[blk_mode_num] |= CRYPTO_CDU_SIZE * 512; + + host->ksm = keyslot_manager_create(cmdq_num_keyslots(host), ksm_ops, + BLK_CRYPTO_FEATURE_STANDARD_KEYS | + BLK_CRYPTO_FEATURE_WRAPPED_KEYS, + crypto_modes_supported, host); + + if (!host->ksm) { + err = -ENOMEM; + goto out; + } + keyslot_manager_set_max_dun_bytes(host->ksm, sizeof(u32)); + + /* + * In case host controller supports cryptographic operations + * then, it uses 128bit task descriptor. Upper 64 bits of task + * descriptor would be used to pass crypto specific informaton. + */ + host->caps |= CMDQ_TASK_DESC_SZ_128; + + return 0; +out: + /* Indicate that init failed by setting crypto_capabilities to 0 */ + host->crypto_capabilities.reg_val = 0; + return err; +} +#else int cmdq_host_init_crypto_qti_spec(struct cmdq_host *host, const struct keyslot_mgmt_ll_ops *ksm_ops) { @@ -237,6 +331,7 @@ int cmdq_host_init_crypto_qti_spec(struct cmdq_host *host, host->crypto_capabilities.reg_val = 0; return err; } +#endif int cmdq_crypto_qti_init_crypto(struct cmdq_host *host, const struct keyslot_mgmt_ll_ops *ksm_ops) @@ -280,6 +375,79 @@ int cmdq_crypto_qti_init_crypto(struct cmdq_host *host, return err; } +int cmdq_crypto_qti_prep_desc(struct cmdq_host *host, struct mmc_request *mrq, + u64 *ice_ctx) +{ + struct bio_crypt_ctx *bc; + struct request *req = mrq->req; + int ret = 0; + int val = 0; +#if IS_ENABLED(CONFIG_CRYPTO_DEV_QCOM_ICE) + struct ice_data_setting setting; + bool bypass = true; + short key_index = 0; +#endif + + *ice_ctx = 0; + if (!req || !req->bio) + return ret; + + if (!bio_crypt_should_process(req)) { +#if IS_ENABLED(CONFIG_CRYPTO_DEV_QCOM_ICE) + ret = qcom_ice_config_start(req, &setting); + if (!ret) { + key_index = setting.crypto_data.key_index; + bypass = (rq_data_dir(req) == WRITE) ? + setting.encr_bypass : setting.decr_bypass; + *ice_ctx = DATA_UNIT_NUM(req->__sector) | + CRYPTO_CONFIG_INDEX(key_index) | + CRYPTO_ENABLE(!bypass); + } else { + pr_err("%s crypto config failed err = %d\n", __func__, + ret); + } +#endif + return ret; + } + if (WARN_ON(!cmdq_is_crypto_enabled(host))) { + /* + * Upper layer asked us to do inline encryption + * but that isn't enabled, so we fail this request. + */ + return -EINVAL; + } + + bc = req->bio->bi_crypt_context; + + if (!cmdq_keyslot_valid(host, bc->bc_keyslot)) + return -EINVAL; + + if (!(atomic_read(&keycache) & (1 << bc->bc_keyslot))) { + if (bc->is_ext4) + cmdq_use_default_du_size = true; + + ret = cmdq_crypto_qti_keyslot_program(host->ksm, bc->bc_key, + bc->bc_keyslot); + if (ret) { + pr_err("%s keyslot program failed %d\n", __func__, ret); + return ret; + } + val = atomic_read(&keycache) | (1 << bc->bc_keyslot); + atomic_set(&keycache, val); + } + + if (ice_ctx) { + if (bc->is_ext4) + *ice_ctx = DATA_UNIT_NUM(req->__sector); + else + *ice_ctx = DATA_UNIT_NUM(bc->bc_dun[0]); + + *ice_ctx = *ice_ctx | CRYPTO_CONFIG_INDEX(bc->bc_keyslot) | + CRYPTO_ENABLE(true); + } + return 0; +} + int cmdq_crypto_qti_debug(struct cmdq_host *host) { return crypto_qti_debug(host->crypto_vops->priv); diff --git a/drivers/mmc/host/cmdq_hci-crypto-qti.h b/drivers/mmc/host/cmdq_hci-crypto-qti.h index e63465bca3e283154d8cc37042ea7bb952f24cb1..a8682599b384b5383f01ee02d2be92590f02588f 100644 --- a/drivers/mmc/host/cmdq_hci-crypto-qti.h +++ b/drivers/mmc/host/cmdq_hci-crypto-qti.h @@ -15,6 +15,12 @@ #include "cmdq_hci-crypto.h" +#if IS_ENABLED(CONFIG_MMC_QTI_NONCMDQ_ICE) +#define CRYPTO_CDU_SIZE 0xFF +#define CRYPTO_ICE_INDEX 3 +#define LEGACY_ICE_CAP_VAL 0x50001F06 +#endif + void cmdq_crypto_qti_enable(struct cmdq_host *host); void cmdq_crypto_qti_disable(struct cmdq_host *host); @@ -30,4 +36,10 @@ void cmdq_crypto_qti_set_vops(struct cmdq_host *host); int cmdq_crypto_qti_resume(struct cmdq_host *host); +int cmdq_crypto_qti_prep_desc(struct cmdq_host *host, + struct mmc_request *mrq, + u64 *ice_ctx); + +int cmdq_crypto_qti_reset(struct cmdq_host *host); + #endif /* _CMDQ_HCI_CRYPTO_QTI_H */ diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index 8c0b80a54e4debe3cd28de7fbfc9d9837a00f067..6d1ac9443eb2233a97f300538120a52e3bdab44b 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c @@ -79,7 +79,7 @@ #define ESDHC_STD_TUNING_EN (1 << 24) /* NOTE: the minimum valid tuning start tap for mx6sl is 1 */ #define ESDHC_TUNING_START_TAP_DEFAULT 0x1 -#define ESDHC_TUNING_START_TAP_MASK 0xff +#define ESDHC_TUNING_START_TAP_MASK 0x7f #define ESDHC_TUNING_STEP_MASK 0x00070000 #define ESDHC_TUNING_STEP_SHIFT 16 diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c index 29a87b31b6de51b81bcb83684b220610b7c6a04d..43bc6dca537dd5ba3ad2ad27cfa56e073c3bb059 100644 --- a/drivers/mmc/host/sdhci-msm.c +++ b/drivers/mmc/host/sdhci-msm.c @@ -1227,6 +1227,12 @@ int sdhci_msm_execute_tuning(struct sdhci_host *host, u32 opcode) (ios.timing == MMC_TIMING_UHS_SDR104))) return 0; + /* + * Clear tuning_done flag before tuning to ensure proper + * HS400 settings. + */ + msm_host->tuning_done = 0; + /* * Don't allow re-tuning for CRC errors observed for any commands * that are sent during tuning sequence itself. diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index b1532938eae127164181b741aed153468cafa459..81dd1286f2ccb066bde34da116dfe1c741813d13 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -180,7 +180,7 @@ static void sdhci_set_card_detection(struct sdhci_host *host, bool enable) u32 present; if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) || - !mmc_card_is_removable(host->mmc)) + !mmc_card_is_removable(host->mmc) || mmc_can_gpio_cd(host->mmc)) return; if (enable) { @@ -1168,7 +1168,7 @@ static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq) WARN_ON(i >= SDHCI_MAX_MRQS); - tasklet_schedule(&host->finish_tasklet); + tasklet_hi_schedule(&host->finish_tasklet); } static void sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq) diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c index a838bf5480d888935aeb79c5a1c3fcb734e07ad0..a863a345fc59b366dfb85bf1b59a8cddad84ebd1 100644 --- a/drivers/mmc/host/via-sdmmc.c +++ b/drivers/mmc/host/via-sdmmc.c @@ -323,6 +323,8 @@ struct via_crdr_mmc_host { /* some devices need a very long delay for power to stabilize */ #define VIA_CRDR_QUIRK_300MS_PWRDELAY 0x0001 +#define VIA_CMD_TIMEOUT_MS 1000 + static const struct pci_device_id via_ids[] = { {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_9530, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0,}, @@ -555,14 +557,17 @@ static void via_sdc_send_command(struct via_crdr_mmc_host *host, { void __iomem *addrbase; struct mmc_data *data; + unsigned int timeout_ms; u32 cmdctrl = 0; WARN_ON(host->cmd); data = cmd->data; - mod_timer(&host->timer, jiffies + HZ); host->cmd = cmd; + timeout_ms = cmd->busy_timeout ? cmd->busy_timeout : VIA_CMD_TIMEOUT_MS; + mod_timer(&host->timer, jiffies + msecs_to_jiffies(timeout_ms)); + /*Command index*/ cmdctrl = cmd->opcode << 8; diff --git a/drivers/mtd/nand/ams-delta.c b/drivers/mtd/nand/ams-delta.c index dcec9cf4983f812833bc9082eee7dc383c913eff..e22a9ffa9cb736a7cdc25fd80e040f8ab34a3265 100644 --- a/drivers/mtd/nand/ams-delta.c +++ b/drivers/mtd/nand/ams-delta.c @@ -263,7 +263,7 @@ static int ams_delta_cleanup(struct platform_device *pdev) void __iomem *io_base = platform_get_drvdata(pdev); /* Release resources, unregister device */ - nand_release(ams_delta_mtd); + nand_release(mtd_to_nand(ams_delta_mtd)); gpio_free_array(_mandatory_gpio, ARRAY_SIZE(_mandatory_gpio)); gpio_free(AMS_DELTA_GPIO_PIN_NAND_RB); diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c index 9d4a28fa6b73b2aaf17591ba57d5cf74b67d1aa7..99c738be2545d062df9ed3e5ba9d93fa34913dce 100644 --- a/drivers/mtd/nand/au1550nd.c +++ b/drivers/mtd/nand/au1550nd.c @@ -496,7 +496,7 @@ static int au1550nd_remove(struct platform_device *pdev) struct au1550nd_ctx *ctx = platform_get_drvdata(pdev); struct resource *r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - nand_release(nand_to_mtd(&ctx->chip)); + nand_release(&ctx->chip); iounmap(ctx->base); release_mem_region(r->start, 0x1000); kfree(ctx); diff --git a/drivers/mtd/nand/bcm47xxnflash/main.c b/drivers/mtd/nand/bcm47xxnflash/main.c index fb31429b70a9a2428d45daee8e9c3c45b3f2b542..d79694160845760f03fa59a40173e348756e3418 100644 --- a/drivers/mtd/nand/bcm47xxnflash/main.c +++ b/drivers/mtd/nand/bcm47xxnflash/main.c @@ -65,7 +65,7 @@ static int bcm47xxnflash_remove(struct platform_device *pdev) { struct bcm47xxnflash *nflash = platform_get_drvdata(pdev); - nand_release(nand_to_mtd(&nflash->nand_chip)); + nand_release(&nflash->nand_chip); return 0; } diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c index 5655dca6ce434769b96e30fb4c59168267f6c461..ebcbcbaa8b54712a194ea63257ef42da0f5af669 100644 --- a/drivers/mtd/nand/bf5xx_nand.c +++ b/drivers/mtd/nand/bf5xx_nand.c @@ -688,7 +688,7 @@ static int bf5xx_nand_remove(struct platform_device *pdev) * and their partitions, then go through freeing the * resources used */ - nand_release(nand_to_mtd(&info->chip)); + nand_release(&info->chip); peripheral_free_list(bfin_nfc_pin_req); bf5xx_nand_dma_remove(info); diff --git a/drivers/mtd/nand/brcmnand/brcmnand.c b/drivers/mtd/nand/brcmnand/brcmnand.c index 2a978d9832a793a433964b180b0aedc4be7f111f..c65724d0c725d3947f85685f5ab432126b10cf84 100644 --- a/drivers/mtd/nand/brcmnand/brcmnand.c +++ b/drivers/mtd/nand/brcmnand/brcmnand.c @@ -491,8 +491,9 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl) } else { ctrl->cs_offsets = brcmnand_cs_offsets; - /* v5.0 and earlier has a different CS0 offset layout */ - if (ctrl->nand_version <= 0x0500) + /* v3.3-5.0 have a different CS0 offset layout */ + if (ctrl->nand_version >= 0x0303 && + ctrl->nand_version <= 0x0500) ctrl->cs0_offsets = brcmnand_cs_offsets_cs0; } @@ -911,11 +912,14 @@ static int brcmnand_hamming_ooblayout_free(struct mtd_info *mtd, int section, if (!section) { /* * Small-page NAND use byte 6 for BBI while large-page - * NAND use byte 0. + * NAND use bytes 0 and 1. */ - if (cfg->page_size > 512) - oobregion->offset++; - oobregion->length--; + if (cfg->page_size > 512) { + oobregion->offset += 2; + oobregion->length -= 2; + } else { + oobregion->length--; + } } } @@ -2594,7 +2598,7 @@ int brcmnand_remove(struct platform_device *pdev) struct brcmnand_host *host; list_for_each_entry(host, &ctrl->host_list, node) - nand_release(nand_to_mtd(&host->chip)); + nand_release(&host->chip); clk_disable_unprepare(ctrl->clk); diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c index bc558c438a57411dfd94ceebb9f8eb117b270292..98c013094fa238e317f72f72c58016dbcf3b94c6 100644 --- a/drivers/mtd/nand/cafe_nand.c +++ b/drivers/mtd/nand/cafe_nand.c @@ -826,7 +826,7 @@ static void cafe_nand_remove(struct pci_dev *pdev) /* Disable NAND IRQ in global IRQ mask register */ cafe_writel(cafe, ~1 & cafe_readl(cafe, GLOBAL_IRQ_MASK), GLOBAL_IRQ_MASK); free_irq(pdev->irq, mtd); - nand_release(mtd); + nand_release(chip); free_rs(cafe->rs); pci_iounmap(pdev, cafe->mmio); dma_free_coherent(&cafe->pdev->dev, diff --git a/drivers/mtd/nand/cmx270_nand.c b/drivers/mtd/nand/cmx270_nand.c index 1fc435f994e1eca9738b6d28c1b5adb8deb367d5..7b824ae88ab013e08eb6352b8ef5a0629019de30 100644 --- a/drivers/mtd/nand/cmx270_nand.c +++ b/drivers/mtd/nand/cmx270_nand.c @@ -230,7 +230,7 @@ module_init(cmx270_init); static void __exit cmx270_cleanup(void) { /* Release resources, unregister device */ - nand_release(cmx270_nand_mtd); + nand_release(mtd_to_nand(cmx270_nand_mtd)); gpio_free(GPIO_NAND_RB); gpio_free(GPIO_NAND_CS); diff --git a/drivers/mtd/nand/cs553x_nand.c b/drivers/mtd/nand/cs553x_nand.c index d48877540f14477a3892080257a98d87b603e690..647d4ee693457233e120358349d4f521cdfbed9f 100644 --- a/drivers/mtd/nand/cs553x_nand.c +++ b/drivers/mtd/nand/cs553x_nand.c @@ -338,7 +338,7 @@ static void __exit cs553x_cleanup(void) mmio_base = this->IO_ADDR_R; /* Release resources, unregister device */ - nand_release(mtd); + nand_release(this); kfree(mtd->name); cs553x_mtd[i] = NULL; diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c index ccc8c43abcff3ec04ec8c55a47b92e66b241346b..e66f1385b49e8f0186f769e44155ad5be7bc6114 100644 --- a/drivers/mtd/nand/davinci_nand.c +++ b/drivers/mtd/nand/davinci_nand.c @@ -854,7 +854,7 @@ static int nand_davinci_remove(struct platform_device *pdev) ecc4_busy = false; spin_unlock_irq(&davinci_nand_lock); - nand_release(nand_to_mtd(&info->chip)); + nand_release(&info->chip); clk_disable_unprepare(info->clk); diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c index 3087b0ba7b7f3d708f92cf58ea838bb7685d3edb..390a18ad68eeaad485b1c03f5a3d5fe1f4d2f7c2 100644 --- a/drivers/mtd/nand/denali.c +++ b/drivers/mtd/nand/denali.c @@ -1444,9 +1444,7 @@ EXPORT_SYMBOL(denali_init); /* driver exit point */ void denali_remove(struct denali_nand_info *denali) { - struct mtd_info *mtd = nand_to_mtd(&denali->nand); - - nand_release(mtd); + nand_release(&denali->nand); kfree(denali->buf); denali_disable_irq(denali); } diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c index c3aa53caab5cfe539def22e4dc3302afd6a0b67f..4f48a9b4f9e3f3edc72adef8011c03f533430269 100644 --- a/drivers/mtd/nand/diskonchip.c +++ b/drivers/mtd/nand/diskonchip.c @@ -1605,13 +1605,10 @@ static int __init doc_probe(unsigned long physadr) numchips = doc2001_init(mtd); if ((ret = nand_scan(mtd, numchips)) || (ret = doc->late_init(mtd))) { - /* DBB note: i believe nand_release is necessary here, as + /* DBB note: i believe nand_cleanup is necessary here, as buffers may have been allocated in nand_base. Check with Thomas. FIX ME! */ - /* nand_release will call mtd_device_unregister, but we - haven't yet added it. This is handled without incident by - mtd_device_unregister, as far as I can tell. */ - nand_release(mtd); + nand_cleanup(nand); kfree(nand); goto fail; } @@ -1644,7 +1641,7 @@ static void release_nanddoc(void) doc = nand_get_controller_data(nand); nextmtd = doc->nextdoc; - nand_release(mtd); + nand_release(nand); iounmap(doc->virtadr); release_mem_region(doc->physadr, DOC_IOREMAP_LEN); kfree(nand); diff --git a/drivers/mtd/nand/docg4.c b/drivers/mtd/nand/docg4.c index 2436cbc71662dc0bd4dbd7fed5d702b557634792..53fdf491d8c00a4ad02b780a66335371a73d3b20 100644 --- a/drivers/mtd/nand/docg4.c +++ b/drivers/mtd/nand/docg4.c @@ -1376,7 +1376,7 @@ static int __init probe_docg4(struct platform_device *pdev) return 0; fail: - nand_release(mtd); /* deletes partitions and mtd devices */ + nand_release(nand); /* deletes partitions and mtd devices */ free_bch(doc->bch); kfree(nand); @@ -1389,7 +1389,7 @@ static int __init probe_docg4(struct platform_device *pdev) static int __exit cleanup_docg4(struct platform_device *pdev) { struct docg4_priv *doc = platform_get_drvdata(pdev); - nand_release(doc->mtd); + nand_release(mtd_to_nand(doc->mtd)); free_bch(doc->bch); kfree(mtd_to_nand(doc->mtd)); iounmap(doc->virtadr); diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c index 17db2f90aa2c33d4c4b6b3da53e4762fdd8e37bd..0f70bd961234e8e3c91c24b10cf762af0e5b79fd 100644 --- a/drivers/mtd/nand/fsl_elbc_nand.c +++ b/drivers/mtd/nand/fsl_elbc_nand.c @@ -813,7 +813,7 @@ static int fsl_elbc_chip_remove(struct fsl_elbc_mtd *priv) struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand; struct mtd_info *mtd = nand_to_mtd(&priv->chip); - nand_release(mtd); + nand_release(&priv->chip); kfree(mtd->name); diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c index 16deba1a2385876ae8002eab2c6df14f1294a576..0d492816143978811088dd9b8971123e7669cf79 100644 --- a/drivers/mtd/nand/fsl_ifc_nand.c +++ b/drivers/mtd/nand/fsl_ifc_nand.c @@ -927,7 +927,7 @@ static int fsl_ifc_chip_remove(struct fsl_ifc_mtd *priv) { struct mtd_info *mtd = nand_to_mtd(&priv->chip); - nand_release(mtd); + nand_release(&priv->chip); kfree(mtd->name); diff --git a/drivers/mtd/nand/fsl_upm.c b/drivers/mtd/nand/fsl_upm.c index a88e2cf66e0f69a0d5d94ea4d1340701c82884ec..009e96fb92ae2879ed69e65e35a8020af651e162 100644 --- a/drivers/mtd/nand/fsl_upm.c +++ b/drivers/mtd/nand/fsl_upm.c @@ -326,7 +326,7 @@ static int fun_remove(struct platform_device *ofdev) struct mtd_info *mtd = nand_to_mtd(&fun->chip); int i; - nand_release(mtd); + nand_release(&fun->chip); kfree(mtd->name); for (i = 0; i < fun->mchip_count; i++) { diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c index eac15d9bf49eb79ed1be86bd868ee8c2711105f4..3be80e15e40021b415e961cab4b863232f608cd6 100644 --- a/drivers/mtd/nand/fsmc_nand.c +++ b/drivers/mtd/nand/fsmc_nand.c @@ -1118,7 +1118,7 @@ static int fsmc_nand_remove(struct platform_device *pdev) struct fsmc_nand_data *host = platform_get_drvdata(pdev); if (host) { - nand_release(nand_to_mtd(&host->nand)); + nand_release(&host->nand); if (host->mode == USE_DMA_ACCESS) { dma_release_channel(host->write_dma_chan); diff --git a/drivers/mtd/nand/gpio.c b/drivers/mtd/nand/gpio.c index fd3648952b5a70281ce9e7ac596facdfe4ddfd04..81b02b81e98448949dbf295ea9d7b34ffd83af34 100644 --- a/drivers/mtd/nand/gpio.c +++ b/drivers/mtd/nand/gpio.c @@ -199,7 +199,7 @@ static int gpio_nand_remove(struct platform_device *pdev) { struct gpiomtd *gpiomtd = platform_get_drvdata(pdev); - nand_release(nand_to_mtd(&gpiomtd->nand_chip)); + nand_release(&gpiomtd->nand_chip); if (gpio_is_valid(gpiomtd->plat.gpio_nwp)) gpio_set_value(gpiomtd->plat.gpio_nwp, 0); diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c index d4d824ef64e9fb395af3bc549daae72b96731e16..c7d0d2eed6c25d1366541cf6f6c1182a49ebfa4e 100644 --- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c +++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c @@ -2135,7 +2135,7 @@ static int gpmi_nand_remove(struct platform_device *pdev) { struct gpmi_nand_data *this = platform_get_drvdata(pdev); - nand_release(nand_to_mtd(&this->nand)); + nand_release(&this->nand); gpmi_free_dma_buffer(this); release_resources(this); return 0; diff --git a/drivers/mtd/nand/hisi504_nand.c b/drivers/mtd/nand/hisi504_nand.c index d9ee1a7e695636b21f13236002903ae7dfe0249a..1d1b541489f843e529c1a6b0e32a30c8b38920f0 100644 --- a/drivers/mtd/nand/hisi504_nand.c +++ b/drivers/mtd/nand/hisi504_nand.c @@ -823,7 +823,7 @@ static int hisi_nfc_probe(struct platform_device *pdev) return 0; err_mtd: - nand_release(mtd); + nand_release(chip); err_res: return ret; } @@ -831,9 +831,8 @@ static int hisi_nfc_probe(struct platform_device *pdev) static int hisi_nfc_remove(struct platform_device *pdev) { struct hinfc_host *host = platform_get_drvdata(pdev); - struct mtd_info *mtd = nand_to_mtd(&host->chip); - nand_release(mtd); + nand_release(&host->chip); return 0; } diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c index ad827d4af3e9bd6dbb229a07c9992abe89b4ce71..62ddea88edce5b8829b9ea38f5606630b2bdb16e 100644 --- a/drivers/mtd/nand/jz4740_nand.c +++ b/drivers/mtd/nand/jz4740_nand.c @@ -480,7 +480,7 @@ static int jz_nand_probe(struct platform_device *pdev) return 0; err_nand_release: - nand_release(mtd); + nand_release(chip); err_unclaim_banks: while (chipnr--) { unsigned char bank = nand->banks[chipnr]; @@ -500,7 +500,7 @@ static int jz_nand_remove(struct platform_device *pdev) struct jz_nand *nand = platform_get_drvdata(pdev); size_t i; - nand_release(nand_to_mtd(&nand->chip)); + nand_release(&nand->chip); /* Deassert and disable all chips */ writel(0, nand->base + JZ_REG_NAND_CTRL); diff --git a/drivers/mtd/nand/jz4780_nand.c b/drivers/mtd/nand/jz4780_nand.c index e69f6ae4c53952f0c8d6798c9c0dda5e49057f06..86ff46eb792561d13d8bc4ed8d24709f9ba9d31a 100644 --- a/drivers/mtd/nand/jz4780_nand.c +++ b/drivers/mtd/nand/jz4780_nand.c @@ -293,7 +293,7 @@ static int jz4780_nand_init_chip(struct platform_device *pdev, ret = mtd_device_register(mtd, NULL, 0); if (ret) { - nand_release(mtd); + nand_release(chip); return ret; } @@ -308,7 +308,7 @@ static void jz4780_nand_cleanup_chips(struct jz4780_nand_controller *nfc) while (!list_empty(&nfc->chips)) { chip = list_first_entry(&nfc->chips, struct jz4780_nand_chip, chip_list); - nand_release(nand_to_mtd(&chip->chip)); + nand_release(&chip->chip); list_del(&chip->chip_list); } } diff --git a/drivers/mtd/nand/lpc32xx_mlc.c b/drivers/mtd/nand/lpc32xx_mlc.c index 5796468db653450494876bcf6c5da772f7f6bd3e..d19d07931c2b830fafdac6b510a987959760263e 100644 --- a/drivers/mtd/nand/lpc32xx_mlc.c +++ b/drivers/mtd/nand/lpc32xx_mlc.c @@ -805,7 +805,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev) if (!res) return res; - nand_release(mtd); + nand_release(nand_chip); err_exit4: free_irq(host->irq, host); @@ -829,9 +829,8 @@ static int lpc32xx_nand_probe(struct platform_device *pdev) static int lpc32xx_nand_remove(struct platform_device *pdev) { struct lpc32xx_nand_host *host = platform_get_drvdata(pdev); - struct mtd_info *mtd = nand_to_mtd(&host->nand_chip); - nand_release(mtd); + nand_release(&host->nand_chip); free_irq(host->irq, host); if (use_dma) dma_release_channel(host->dma_chan); diff --git a/drivers/mtd/nand/lpc32xx_slc.c b/drivers/mtd/nand/lpc32xx_slc.c index b61f28a1554d394acd282a865469ab246a0bef63..1be4aaa3d6e075e467f8d0bf8d861dcc7ce75b15 100644 --- a/drivers/mtd/nand/lpc32xx_slc.c +++ b/drivers/mtd/nand/lpc32xx_slc.c @@ -935,7 +935,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev) if (!res) return res; - nand_release(mtd); + nand_release(chip); err_exit3: dma_release_channel(host->dma_chan); @@ -954,9 +954,8 @@ static int lpc32xx_nand_remove(struct platform_device *pdev) { uint32_t tmp; struct lpc32xx_nand_host *host = platform_get_drvdata(pdev); - struct mtd_info *mtd = nand_to_mtd(&host->nand_chip); - nand_release(mtd); + nand_release(&host->nand_chip); dma_release_channel(host->dma_chan); /* Force CE high */ diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c index b6b97cc9fba6966ec2efaa2dd40e6859ca4e2a51..b8a93b47a290171b7eaabb020334418b5aaec042 100644 --- a/drivers/mtd/nand/mpc5121_nfc.c +++ b/drivers/mtd/nand/mpc5121_nfc.c @@ -829,7 +829,7 @@ static int mpc5121_nfc_remove(struct platform_device *op) struct device *dev = &op->dev; struct mtd_info *mtd = dev_get_drvdata(dev); - nand_release(mtd); + nand_release(mtd_to_nand(mtd)); mpc5121_nfc_free(dev, mtd); return 0; diff --git a/drivers/mtd/nand/mtk_nand.c b/drivers/mtd/nand/mtk_nand.c index e17f838b9b8141555594f9326efd91858baf1eea..ff314ce104e58234fd658a684a26dcd5e40aaa47 100644 --- a/drivers/mtd/nand/mtk_nand.c +++ b/drivers/mtd/nand/mtk_nand.c @@ -1357,7 +1357,7 @@ static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc, ret = mtd_device_parse_register(mtd, NULL, NULL, NULL, 0); if (ret) { dev_err(dev, "mtd parse partition error\n"); - nand_release(mtd); + nand_cleanup(nand); return ret; } @@ -1514,7 +1514,7 @@ static int mtk_nfc_remove(struct platform_device *pdev) while (!list_empty(&nfc->chips)) { chip = list_first_entry(&nfc->chips, struct mtk_nfc_nand_chip, node); - nand_release(nand_to_mtd(&chip->nand)); + nand_release(&chip->nand); list_del(&chip->node); } diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c index fcb575d55b89b7fd83eeacbe29de4bec09d66e5b..808d85bde9f222c564d096faac9ba4105ccc8609 100644 --- a/drivers/mtd/nand/mxc_nand.c +++ b/drivers/mtd/nand/mxc_nand.c @@ -1834,7 +1834,7 @@ static int mxcnd_remove(struct platform_device *pdev) { struct mxc_nand_host *host = platform_get_drvdata(pdev); - nand_release(nand_to_mtd(&host->nand)); + nand_release(&host->nand); if (host->clk_act) clk_disable_unprepare(host->clk); diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index d410de3318542521bf283641457f0bae6e5f5775..e953eca67608aba13e9369096123b197ac6d2640 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -5046,12 +5046,12 @@ EXPORT_SYMBOL_GPL(nand_cleanup); /** * nand_release - [NAND Interface] Unregister the MTD device and free resources * held by the NAND device - * @mtd: MTD device structure + * @chip: NAND chip object */ -void nand_release(struct mtd_info *mtd) +void nand_release(struct nand_chip *chip) { - mtd_device_unregister(mtd); - nand_cleanup(mtd_to_nand(mtd)); + mtd_device_unregister(nand_to_mtd(chip)); + nand_cleanup(chip); } EXPORT_SYMBOL_GPL(nand_release); diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c index 44322a363ba549dd7120138fb2b0f977a09260f5..dbb0e47f519755f1a5f4697233dedac4e179662e 100644 --- a/drivers/mtd/nand/nandsim.c +++ b/drivers/mtd/nand/nandsim.c @@ -2356,7 +2356,7 @@ static int __init ns_init_module(void) err_exit: free_nandsim(nand); - nand_release(nsmtd); + nand_release(chip); for (i = 0;i < ARRAY_SIZE(nand->partitions); ++i) kfree(nand->partitions[i].name); error: @@ -2378,7 +2378,7 @@ static void __exit ns_cleanup_module(void) int i; free_nandsim(ns); /* Free nandsim private resources */ - nand_release(nsmtd); /* Unregister driver */ + nand_release(chip); /* Unregister driver */ for (i = 0;i < ARRAY_SIZE(ns->partitions); ++i) kfree(ns->partitions[i].name); kfree(mtd_to_nand(nsmtd)); /* Free other structures */ diff --git a/drivers/mtd/nand/ndfc.c b/drivers/mtd/nand/ndfc.c index d8a806894937660dce9defd2fdf35c5a3c654126..eb84328d9bded9892a1bb0c4d29e39585748635a 100644 --- a/drivers/mtd/nand/ndfc.c +++ b/drivers/mtd/nand/ndfc.c @@ -258,7 +258,7 @@ static int ndfc_remove(struct platform_device *ofdev) struct ndfc_controller *ndfc = dev_get_drvdata(&ofdev->dev); struct mtd_info *mtd = nand_to_mtd(&ndfc->chip); - nand_release(mtd); + nand_release(&ndfc->chip); kfree(mtd->name); return 0; diff --git a/drivers/mtd/nand/nuc900_nand.c b/drivers/mtd/nand/nuc900_nand.c index 7bb4d2ea93421a4d880b77bdce8fad99052fc53f..a79f88c6d0102a14057d6d8dc603750dce42b439 100644 --- a/drivers/mtd/nand/nuc900_nand.c +++ b/drivers/mtd/nand/nuc900_nand.c @@ -284,7 +284,7 @@ static int nuc900_nand_remove(struct platform_device *pdev) { struct nuc900_nand *nuc900_nand = platform_get_drvdata(pdev); - nand_release(nand_to_mtd(&nuc900_nand->chip)); + nand_release(&nuc900_nand->chip); clk_disable(nuc900_nand->clk); return 0; diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c index 9f98f74ff221aff93431e6408c899d2c20e7f58a..110c0726c665e77c3fabcfb8de482a88fb8885dc 100644 --- a/drivers/mtd/nand/omap2.c +++ b/drivers/mtd/nand/omap2.c @@ -2306,7 +2306,7 @@ static int omap_nand_remove(struct platform_device *pdev) } if (info->dma) dma_release_channel(info->dma); - nand_release(mtd); + nand_release(nand_chip); return 0; } diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c index 5a5aa1f07d07477cb9a5e0b88d9309a772e9282e..df07f9b4cf03af694feee92b03568d2c0884cbc7 100644 --- a/drivers/mtd/nand/orion_nand.c +++ b/drivers/mtd/nand/orion_nand.c @@ -186,7 +186,7 @@ static int __init orion_nand_probe(struct platform_device *pdev) mtd->name = "orion_nand"; ret = mtd_device_register(mtd, board->parts, board->nr_parts); if (ret) { - nand_release(mtd); + nand_cleanup(nc); goto no_dev; } @@ -201,9 +201,8 @@ static int orion_nand_remove(struct platform_device *pdev) { struct orion_nand_info *info = platform_get_drvdata(pdev); struct nand_chip *chip = &info->chip; - struct mtd_info *mtd = nand_to_mtd(chip); - nand_release(mtd); + nand_release(chip); clk_disable_unprepare(info->clk); diff --git a/drivers/mtd/nand/oxnas_nand.c b/drivers/mtd/nand/oxnas_nand.c index d649d5944826ef9005a495720e1c0a5966147da5..08b6ae364f2b766c15eaaf775459836cf7635dc6 100644 --- a/drivers/mtd/nand/oxnas_nand.c +++ b/drivers/mtd/nand/oxnas_nand.c @@ -36,6 +36,7 @@ struct oxnas_nand_ctrl { void __iomem *io_base; struct clk *clk; struct nand_chip *chips[OXNAS_NAND_MAX_CHIPS]; + unsigned int nchips; }; static uint8_t oxnas_nand_read_byte(struct mtd_info *mtd) @@ -86,9 +87,9 @@ static int oxnas_nand_probe(struct platform_device *pdev) struct nand_chip *chip; struct mtd_info *mtd; struct resource *res; - int nchips = 0; int count = 0; int err = 0; + int i; /* Allocate memory for the device structure (and zero it) */ oxnas = devm_kzalloc(&pdev->dev, sizeof(*oxnas), @@ -123,7 +124,7 @@ static int oxnas_nand_probe(struct platform_device *pdev) GFP_KERNEL); if (!chip) { err = -ENOMEM; - goto err_clk_unprepare; + goto err_release_child; } chip->controller = &oxnas->base; @@ -144,20 +145,18 @@ static int oxnas_nand_probe(struct platform_device *pdev) /* Scan to find existence of the device */ err = nand_scan(mtd, 1); if (err) - goto err_clk_unprepare; + goto err_release_child; err = mtd_device_register(mtd, NULL, 0); - if (err) { - nand_release(mtd); - goto err_clk_unprepare; - } + if (err) + goto err_cleanup_nand; - oxnas->chips[nchips] = chip; - ++nchips; + oxnas->chips[oxnas->nchips] = chip; + ++oxnas->nchips; } /* Exit if no chips found */ - if (!nchips) { + if (!oxnas->nchips) { err = -ENODEV; goto err_clk_unprepare; } @@ -166,6 +165,17 @@ static int oxnas_nand_probe(struct platform_device *pdev) return 0; +err_cleanup_nand: + nand_cleanup(chip); +err_release_child: + of_node_put(nand_np); + + for (i = 0; i < oxnas->nchips; i++) { + chip = oxnas->chips[i]; + WARN_ON(mtd_device_unregister(nand_to_mtd(chip))); + nand_cleanup(chip); + } + err_clk_unprepare: clk_disable_unprepare(oxnas->clk); return err; @@ -174,9 +184,13 @@ static int oxnas_nand_probe(struct platform_device *pdev) static int oxnas_nand_remove(struct platform_device *pdev) { struct oxnas_nand_ctrl *oxnas = platform_get_drvdata(pdev); + struct nand_chip *chip; + int i; - if (oxnas->chips[0]) - nand_release(nand_to_mtd(oxnas->chips[0])); + for (i = 0; i < oxnas->nchips; i++) { + chip = oxnas->chips[i]; + nand_release(chip); + } clk_disable_unprepare(oxnas->clk); diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c index a47a7e4bd25aeba4a67ebe6e057278721299d2e5..f0f4ff9609655a9f75817da153b1f08ad671ba49 100644 --- a/drivers/mtd/nand/pasemi_nand.c +++ b/drivers/mtd/nand/pasemi_nand.c @@ -163,7 +163,7 @@ static int pasemi_nand_probe(struct platform_device *ofdev) if (mtd_device_register(pasemi_nand_mtd, NULL, 0)) { dev_err(dev, "Unable to register MTD device\n"); err = -ENODEV; - goto out_lpc; + goto out_cleanup_nand; } dev_info(dev, "PA Semi NAND flash at %pR, control at I/O %x\n", &res, @@ -171,6 +171,8 @@ static int pasemi_nand_probe(struct platform_device *ofdev) return 0; + out_cleanup_nand: + nand_cleanup(chip); out_lpc: release_region(lpcctl, 4); out_ior: @@ -191,7 +193,7 @@ static int pasemi_nand_remove(struct platform_device *ofdev) chip = mtd_to_nand(pasemi_nand_mtd); /* Release resources, unregister device */ - nand_release(pasemi_nand_mtd); + nand_release(chip); release_region(lpcctl, 4); diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c index 925a1323604de5ab0c6c897789f255804d6106a8..8c2d1c5c95691954ab6af035e113470fe240396d 100644 --- a/drivers/mtd/nand/plat_nand.c +++ b/drivers/mtd/nand/plat_nand.c @@ -99,7 +99,7 @@ static int plat_nand_probe(struct platform_device *pdev) if (!err) return err; - nand_release(mtd); + nand_cleanup(&data->chip); out: if (pdata->ctrl.remove) pdata->ctrl.remove(pdev); @@ -114,7 +114,7 @@ static int plat_nand_remove(struct platform_device *pdev) struct plat_nand_data *data = platform_get_drvdata(pdev); struct platform_nand_data *pdata = dev_get_platdata(&pdev->dev); - nand_release(nand_to_mtd(&data->chip)); + nand_release(&data->chip); if (pdata->ctrl.remove) pdata->ctrl.remove(pdev); diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c index 125b744c9c285c91eddc9f0c22684cf31dcfa552..df62f99979f3caa2428c37ed11f55e58021b9ecf 100644 --- a/drivers/mtd/nand/pxa3xx_nand.c +++ b/drivers/mtd/nand/pxa3xx_nand.c @@ -1915,7 +1915,7 @@ static int pxa3xx_nand_remove(struct platform_device *pdev) clk_disable_unprepare(info->clk); for (cs = 0; cs < pdata->num_cs; cs++) - nand_release(nand_to_mtd(&info->host[cs]->chip)); + nand_release(&info->host[cs]->chip); return 0; } diff --git a/drivers/mtd/nand/qcom_nandc.c b/drivers/mtd/nand/qcom_nandc.c index 09d5f7df60237dee9bf77c3606f63c9ed488c61d..65d1be2c30497259a6dfac4b10a9fdbb769b20c5 100644 --- a/drivers/mtd/nand/qcom_nandc.c +++ b/drivers/mtd/nand/qcom_nandc.c @@ -2760,7 +2760,7 @@ static int qcom_nandc_remove(struct platform_device *pdev) struct qcom_nand_host *host; list_for_each_entry(host, &nandc->host_list, node) - nand_release(nand_to_mtd(&host->chip)); + nand_release(&host->chip); qcom_nandc_unalloc(nandc); diff --git a/drivers/mtd/nand/r852.c b/drivers/mtd/nand/r852.c index fc9287af4614039d305362b579112bc9d90cab0a..2cfa54941395228f41b66a01d6bd96b9c0ae1b32 100644 --- a/drivers/mtd/nand/r852.c +++ b/drivers/mtd/nand/r852.c @@ -656,7 +656,7 @@ static int r852_register_nand_device(struct r852_device *dev) dev->card_registred = 1; return 0; error3: - nand_release(mtd); + nand_release(dev->chip); error1: /* Force card redetect */ dev->card_detected = 0; @@ -675,7 +675,7 @@ static void r852_unregister_nand_device(struct r852_device *dev) return; device_remove_file(&mtd->dev, &dev_attr_media_type); - nand_release(mtd); + nand_release(dev->chip); r852_engine_disable(dev); dev->card_registred = 0; } diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c index 4c383eeec6f6f74aeb2c5894dd221b6ee41a1b73..f60de68bfabcc5712831ee5cdedd1ea881bf28fb 100644 --- a/drivers/mtd/nand/s3c2410.c +++ b/drivers/mtd/nand/s3c2410.c @@ -784,7 +784,7 @@ static int s3c24xx_nand_remove(struct platform_device *pdev) for (mtdno = 0; mtdno < info->mtd_count; mtdno++, ptr++) { pr_debug("releasing mtd %d (%p)\n", mtdno, ptr); - nand_release(nand_to_mtd(&ptr->chip)); + nand_release(&ptr->chip); } } diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c index 43db80e5d994f7936a96ea7290ce01748a757507..f2ed03ee30355ca3be255ed8ebc540ca7154b505 100644 --- a/drivers/mtd/nand/sh_flctl.c +++ b/drivers/mtd/nand/sh_flctl.c @@ -1231,7 +1231,7 @@ static int flctl_remove(struct platform_device *pdev) struct sh_flctl *flctl = platform_get_drvdata(pdev); flctl_release_dma(flctl); - nand_release(nand_to_mtd(&flctl->chip)); + nand_release(&flctl->chip); pm_runtime_disable(&pdev->dev); return 0; diff --git a/drivers/mtd/nand/sharpsl.c b/drivers/mtd/nand/sharpsl.c index f59c455d9f5170f7749bd7b7a35f4ed7ca1325a6..c245d66609c1f4be52cce34fde53ce2d64b74e32 100644 --- a/drivers/mtd/nand/sharpsl.c +++ b/drivers/mtd/nand/sharpsl.c @@ -192,7 +192,7 @@ static int sharpsl_nand_probe(struct platform_device *pdev) return 0; err_add: - nand_release(mtd); + nand_cleanup(this); err_scan: iounmap(sharpsl->io); @@ -210,7 +210,7 @@ static int sharpsl_nand_remove(struct platform_device *pdev) struct sharpsl_nand *sharpsl = platform_get_drvdata(pdev); /* Release resources, unregister device */ - nand_release(nand_to_mtd(&sharpsl->chip)); + nand_release(&sharpsl->chip); iounmap(sharpsl->io); diff --git a/drivers/mtd/nand/socrates_nand.c b/drivers/mtd/nand/socrates_nand.c index 575997d0ef8a0d4a64f9fd65d0d299a9105fc029..8d4f0cd7197d301d4e8dd04e7f29698abb9e8035 100644 --- a/drivers/mtd/nand/socrates_nand.c +++ b/drivers/mtd/nand/socrates_nand.c @@ -195,7 +195,7 @@ static int socrates_nand_probe(struct platform_device *ofdev) if (!res) return res; - nand_release(mtd); + nand_cleanup(nand_chip); out: iounmap(host->io_base); @@ -208,9 +208,8 @@ static int socrates_nand_probe(struct platform_device *ofdev) static int socrates_nand_remove(struct platform_device *ofdev) { struct socrates_nand_host *host = dev_get_drvdata(&ofdev->dev); - struct mtd_info *mtd = nand_to_mtd(&host->nand_chip); - nand_release(mtd); + nand_release(&host->nand_chip); iounmap(host->io_base); diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c index 8e523148239769107f5fdef8674de422d0c8fb2a..d6e31e8a7b668e8e1caf13867fe249c9bf1974f2 100644 --- a/drivers/mtd/nand/sunxi_nand.c +++ b/drivers/mtd/nand/sunxi_nand.c @@ -2125,7 +2125,7 @@ static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc, ret = mtd_device_register(mtd, NULL, 0); if (ret) { dev_err(dev, "failed to register mtd device: %d\n", ret); - nand_release(mtd); + nand_release(nand); return ret; } @@ -2164,7 +2164,7 @@ static void sunxi_nand_chips_cleanup(struct sunxi_nfc *nfc) while (!list_empty(&nfc->chips)) { chip = list_first_entry(&nfc->chips, struct sunxi_nand_chip, node); - nand_release(nand_to_mtd(&chip->nand)); + nand_release(&chip->nand); sunxi_nand_ecc_cleanup(&chip->nand.ecc); list_del(&chip->node); } diff --git a/drivers/mtd/nand/tango_nand.c b/drivers/mtd/nand/tango_nand.c index ce366816a7efb47fee4fb947ea3a2507718dffc2..1ab16a90ea296362c5e82d947dab9f6190f3b0d2 100644 --- a/drivers/mtd/nand/tango_nand.c +++ b/drivers/mtd/nand/tango_nand.c @@ -619,7 +619,7 @@ static int tango_nand_remove(struct platform_device *pdev) for (cs = 0; cs < MAX_CS; ++cs) { if (nfc->chips[cs]) - nand_release(nand_to_mtd(&nfc->chips[cs]->nand_chip)); + nand_release(&nfc->chips[cs]->nand_chip); } return 0; diff --git a/drivers/mtd/nand/tmio_nand.c b/drivers/mtd/nand/tmio_nand.c index 84dbf32332e1292844676a5f9c2b85f6fdf1562e..51f12b9f90ba1b54820cfedebc7b5656683c7d2b 100644 --- a/drivers/mtd/nand/tmio_nand.c +++ b/drivers/mtd/nand/tmio_nand.c @@ -448,7 +448,7 @@ static int tmio_probe(struct platform_device *dev) if (!retval) return retval; - nand_release(mtd); + nand_cleanup(nand_chip); err_irq: tmio_hw_stop(dev, tmio); @@ -459,7 +459,7 @@ static int tmio_remove(struct platform_device *dev) { struct tmio_nand *tmio = platform_get_drvdata(dev); - nand_release(nand_to_mtd(&tmio->chip)); + nand_release(&tmio->chip); tmio_hw_stop(dev, tmio); return 0; } diff --git a/drivers/mtd/nand/txx9ndfmc.c b/drivers/mtd/nand/txx9ndfmc.c index b567d212fe7ded16f935386326ed06a8793b2a77..236181b2985ad9f105f08c49371499638c599294 100644 --- a/drivers/mtd/nand/txx9ndfmc.c +++ b/drivers/mtd/nand/txx9ndfmc.c @@ -390,7 +390,7 @@ static int __exit txx9ndfmc_remove(struct platform_device *dev) chip = mtd_to_nand(mtd); txx9_priv = nand_get_controller_data(chip); - nand_release(mtd); + nand_release(chip); kfree(txx9_priv->mtdname); kfree(txx9_priv); } diff --git a/drivers/mtd/nand/vf610_nfc.c b/drivers/mtd/nand/vf610_nfc.c index e2583a539b413e3299691df313d737c33565b445..688393526b5aac00ab327fa1769f984d3f74b8c0 100644 --- a/drivers/mtd/nand/vf610_nfc.c +++ b/drivers/mtd/nand/vf610_nfc.c @@ -794,7 +794,7 @@ static int vf610_nfc_remove(struct platform_device *pdev) struct mtd_info *mtd = platform_get_drvdata(pdev); struct vf610_nfc *nfc = mtd_to_nfc(mtd); - nand_release(mtd); + nand_release(mtd_to_nand(mtd)); clk_disable_unprepare(nfc->clk); return 0; } diff --git a/drivers/mtd/nand/xway_nand.c b/drivers/mtd/nand/xway_nand.c index 9926b4e3d69d014d70bb9f48941e9bc3037cce77..176a94fa31d7fa5c80453b377df347f34ad344d5 100644 --- a/drivers/mtd/nand/xway_nand.c +++ b/drivers/mtd/nand/xway_nand.c @@ -211,7 +211,7 @@ static int xway_nand_probe(struct platform_device *pdev) err = mtd_device_register(mtd, NULL, 0); if (err) - nand_release(mtd); + nand_cleanup(&data->chip); return err; } @@ -223,7 +223,7 @@ static int xway_nand_remove(struct platform_device *pdev) { struct xway_nand_data *data = platform_get_drvdata(pdev); - nand_release(nand_to_mtd(&data->chip)); + nand_release(&data->chip); return 0; } diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c index 7bc96294ae4d530ef129dff3b6c67978f6e227da..b108e1f04bf62e64dd586851fa04a1680d37841d 100644 --- a/drivers/mtd/ubi/debug.c +++ b/drivers/mtd/ubi/debug.c @@ -405,9 +405,6 @@ static void *eraseblk_count_seq_start(struct seq_file *s, loff_t *pos) { struct ubi_device *ubi = s->private; - if (*pos == 0) - return SEQ_START_TOKEN; - if (*pos < ubi->peb_count) return pos; @@ -421,8 +418,6 @@ static void *eraseblk_count_seq_next(struct seq_file *s, void *v, loff_t *pos) { struct ubi_device *ubi = s->private; - if (v == SEQ_START_TOKEN) - return pos; (*pos)++; if (*pos < ubi->peb_count) @@ -444,11 +439,8 @@ static int eraseblk_count_seq_show(struct seq_file *s, void *iter) int err; /* If this is the start, print a header */ - if (iter == SEQ_START_TOKEN) { - seq_puts(s, - "physical_block_number\terase_count\tblock_status\tread_status\n"); - return 0; - } + if (*block_number == 0) + seq_puts(s, "physical_block_number\terase_count\n"); err = ubi_io_is_bad(ubi, *block_number); if (err) diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index fef599eb822b6d1958e7d759ba2717c1ca6051ab..1f867e275408eca2a52d0e71e47406e7e1f37a10 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -4773,15 +4773,19 @@ int bond_create(struct net *net, const char *name) bond_dev->rtnl_link_ops = &bond_link_ops; res = register_netdevice(bond_dev); + if (res < 0) { + free_netdev(bond_dev); + rtnl_unlock(); + + return res; + } netif_carrier_off(bond_dev); bond_work_init_all(bond); rtnl_unlock(); - if (res < 0) - free_netdev(bond_dev); - return res; + return 0; } static int __net_init bond_net_init(struct net *net) diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c index 77babf1417a7e96c727319ea387bfee5331f4fad..0e95eeb822ea5f374fb3f33eff85910385a3eea3 100644 --- a/drivers/net/bonding/bond_netlink.c +++ b/drivers/net/bonding/bond_netlink.c @@ -451,11 +451,10 @@ static int bond_newlink(struct net *src_net, struct net_device *bond_dev, return err; err = register_netdevice(bond_dev); - - netif_carrier_off(bond_dev); if (!err) { struct bonding *bond = netdev_priv(bond_dev); + netif_carrier_off(bond_dev); bond_work_init_all(bond); } diff --git a/drivers/net/bonding/bond_sysfs_slave.c b/drivers/net/bonding/bond_sysfs_slave.c index 641a532b67cbc1b7e8101c171b195830c0501ded..3f756fa2f603b2fb081cbad8ef4481f2cc54ede0 100644 --- a/drivers/net/bonding/bond_sysfs_slave.c +++ b/drivers/net/bonding/bond_sysfs_slave.c @@ -153,8 +153,10 @@ int bond_sysfs_slave_add(struct slave *slave) err = kobject_init_and_add(&slave->kobj, &slave_ktype, &(slave->dev->dev.kobj), "bonding_slave"); - if (err) + if (err) { + kobject_put(&slave->kobj); return err; + } for (a = slave_attrs; *a; ++a) { err = sysfs_create_file(&slave->kobj, &((*a)->attr)); diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c index daed57d3d2097d5f8ad026e5b902811fff3db813..2b994bbf85ca3169139df81c24b54b76093de6db 100644 --- a/drivers/net/can/usb/kvaser_usb.c +++ b/drivers/net/can/usb/kvaser_usb.c @@ -791,7 +791,7 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv, if (!urb) return -ENOMEM; - buf = kmalloc(sizeof(struct kvaser_msg), GFP_ATOMIC); + buf = kzalloc(sizeof(struct kvaser_msg), GFP_ATOMIC); if (!buf) { usb_free_urb(urb); return -ENOMEM; @@ -1459,7 +1459,7 @@ static int kvaser_usb_set_opt_mode(const struct kvaser_usb_net_priv *priv) struct kvaser_msg *msg; int rc; - msg = kmalloc(sizeof(*msg), GFP_KERNEL); + msg = kzalloc(sizeof(*msg), GFP_KERNEL); if (!msg) return -ENOMEM; @@ -1592,7 +1592,7 @@ static int kvaser_usb_flush_queue(struct kvaser_usb_net_priv *priv) struct kvaser_msg *msg; int rc; - msg = kmalloc(sizeof(*msg), GFP_KERNEL); + msg = kzalloc(sizeof(*msg), GFP_KERNEL); if (!msg) return -ENOMEM; diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index b40ebc27e1ece2f25c86459f6343a43e11973cdb..9f355673f630ca03f8f2d57d704ea491d67d0f7f 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -1175,6 +1175,8 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev) */ set_bit(0, priv->cfp.used); + /* Balance of_node_put() done by of_find_node_by_name() */ + of_node_get(dn); ports = of_find_node_by_name(dn, "ports"); if (ports) { bcm_sf2_identify_ports(priv, ports); diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c index d55051abf4edc683a7959120f29a0db0bc9c9d86..a5a83d86bb0f51955625754710382ffdd0792718 100644 --- a/drivers/net/dsa/dsa_loop.c +++ b/drivers/net/dsa/dsa_loop.c @@ -357,6 +357,7 @@ static void __exit dsa_loop_exit(void) } module_exit(dsa_loop_exit); +MODULE_SOFTDEP("pre: dsa_loop_bdinfo"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Florian Fainelli"); MODULE_DESCRIPTION("DSA loopback driver"); diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c index 3143de45baaad58bb08c0aad86711351a7cd39d7..c458b81ba63affc70af5aaa01a07afd895c4d2c8 100644 --- a/drivers/net/ethernet/allwinner/sun4i-emac.c +++ b/drivers/net/ethernet/allwinner/sun4i-emac.c @@ -433,7 +433,7 @@ static void emac_timeout(struct net_device *dev) /* Hardware start transmission. * Send a packet to media from the upper layer. */ -static int emac_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t emac_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct emac_board_info *db = netdev_priv(dev); unsigned long channel; @@ -441,7 +441,7 @@ static int emac_start_xmit(struct sk_buff *skb, struct net_device *dev) channel = db->tx_fifo_stat & 3; if (channel == 3) - return 1; + return NETDEV_TX_BUSY; channel = (channel == 1 ? 1 : 0); diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c index dc9149a32f412a0be72cc22afdf3edf1b01ef694..bb1710ff910a64369038e6205f287f09d6cb14cc 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_com.c @@ -2131,6 +2131,9 @@ int ena_com_get_hash_function(struct ena_com_dev *ena_dev, rss->hash_key; int rc; + if (unlikely(!func)) + return -EINVAL; + rc = ena_com_get_feature_ex(ena_dev, &get_resp, ENA_ADMIN_RSS_HASH_FUNCTION, rss->hash_key_dma_addr, @@ -2143,8 +2146,7 @@ int ena_com_get_hash_function(struct ena_com_dev *ena_dev, if (rss->hash_func) rss->hash_func--; - if (func) - *func = rss->hash_func; + *func = rss->hash_func; if (key) memcpy(key, hash_key->key, (size_t)(hash_key->keys_num) << 2); diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c index eac740c476ceb5c38d4bd9af09d3da62d4e0121e..a8b462e1beba6fb5b57495ac2f6e149a9beeff73 100644 --- a/drivers/net/ethernet/apple/bmac.c +++ b/drivers/net/ethernet/apple/bmac.c @@ -1187,7 +1187,7 @@ bmac_get_station_address(struct net_device *dev, unsigned char *ea) int i; unsigned short data; - for (i = 0; i < 6; i++) + for (i = 0; i < 3; i++) { reset_and_select_srom(dev); data = read_srom(dev, i + EnetAddressOffset/2, SROMAddressBits); diff --git a/drivers/net/ethernet/aquantia/atlantic-fwd/Makefile b/drivers/net/ethernet/aquantia/atlantic-fwd/Makefile index c574e59cb11dc40a1d72e86ab2bf813af5636cfc..c16bb87e6e17d69ac71603c59203f2cef345a41b 100644 --- a/drivers/net/ethernet/aquantia/atlantic-fwd/Makefile +++ b/drivers/net/ethernet/aquantia/atlantic-fwd/Makefile @@ -18,7 +18,9 @@ atlantic-fwd-objs := atl_fw.o \ atl_ethtool.o \ atl_trace.o \ atl_compat.o \ - atl_hwmon.o + atl_hwmon.o \ + atl_ptp.o \ + atl_hw_ptp.o atlantic-fwd-$(CONFIG_ATLFWD_FWD) += atl_fwd.o atlantic-fwd-$(CONFIG_ATLFWD_FWD_NETLINK) += atl_fwdnl.o \ diff --git a/drivers/net/ethernet/aquantia/atlantic-fwd/atl2_fw.c b/drivers/net/ethernet/aquantia/atlantic-fwd/atl2_fw.c index 7aee0e1e63dabceb29d82ad49a9589aa8d4ef1f3..a5f0ca04bb09fe7e75e1ace202f54d2978c98dae 100644 --- a/drivers/net/ethernet/aquantia/atlantic-fwd/atl2_fw.c +++ b/drivers/net/ethernet/aquantia/atlantic-fwd/atl2_fw.c @@ -153,10 +153,56 @@ static inline int atl2_shared_buffer_finish_ack(struct atl_hw *hw) return err; } +static int atl2_fw_get_filter_caps(struct atl_hw *hw) +{ + struct atl_nic *nic = container_of(hw, struct atl_nic, hw); + struct filter_caps_s filter_caps; + u32 tag_top; + int err; + + err = atl2_shared_buffer_read_safe(hw, filter_caps, &filter_caps); + if (err) + return err; + + hw->art_base_index = filter_caps.rslv_tbl_base_index * 8; + hw->art_available = filter_caps.rslv_tbl_count * 8; + if (hw->art_available == 0) + hw->art_available = 128; + nic->rxf_flex.available = 1; + nic->rxf_flex.base_index = filter_caps.flexible_filter_mask >> 1; + nic->rxf_mac.base_index = filter_caps.l2_filters_base_index; + nic->rxf_mac.available = filter_caps.l2_filter_count; + nic->rxf_etype.base_index = filter_caps.ethertype_filter_base_index; + nic->rxf_etype.available = filter_caps.ethertype_filter_count; + nic->rxf_etype.tag_top = + (nic->rxf_etype.available >= ATL2_RPF_ETYPE_TAGS) ? + (ATL2_RPF_ETYPE_TAGS) : (ATL2_RPF_ETYPE_TAGS >> 1); + nic->rxf_vlan.base_index = filter_caps.vlan_filter_base_index; + /* 0 - no tag, 1 - reserved for vlan-filter-offload filters */ + tag_top = (filter_caps.vlan_filter_count == ATL_VLAN_FLT_NUM) ? + (ATL_VLAN_FLT_NUM - 2) : + (ATL_VLAN_FLT_NUM / 2 - 2); + nic->rxf_vlan.available = min_t(u32, filter_caps.vlan_filter_count - 2, + tag_top); + nic->rxf_ntuple.l3_v4_base_index = filter_caps.l3_ip4_filter_base_index; + nic->rxf_ntuple.l3_v4_available = min_t(u32, + filter_caps.l3_ip4_filter_count, + ATL_NTUPLE_FLT_NUM - 1); + nic->rxf_ntuple.l3_v6_base_index = filter_caps.l3_ip6_filter_base_index; + nic->rxf_ntuple.l3_v6_available = filter_caps.l3_ip6_filter_count; + nic->rxf_ntuple.l4_base_index = filter_caps.l4_filter_base_index; + nic->rxf_ntuple.l4_available = min_t(u32, filter_caps.l4_filter_count, + ATL_NTUPLE_FLT_NUM - 1); + + return 0; +} + static int __atl2_fw_wait_init(struct atl_hw *hw) { + struct request_policy_s request_policy; struct link_control_s link_control; uint32_t mtu; + int err; BUILD_BUG_ON_MSG(sizeof(struct link_options_s) != 0x4, "linkOptions invalid size"); @@ -168,7 +214,7 @@ static int __atl2_fw_wait_init(struct atl_hw *hw) "pauseQuanta invalid size"); BUILD_BUG_ON_MSG(sizeof(struct cable_diag_control_s) != 0x4, "cableDiagControl invalid size"); - BUILD_BUG_ON_MSG(sizeof(struct statistics_s) != 0x6C, + BUILD_BUG_ON_MSG(sizeof(struct statistics_s) != 0x70, "statistics_s invalid size"); @@ -225,9 +271,16 @@ static int __atl2_fw_wait_init(struct atl_hw *hw) "stats invalid offset"); BUILD_BUG_ON_MSG(offsetof(struct fw_interface_out, filter_caps) != 0x774, "filter_caps invalid offset"); + BUILD_BUG_ON_MSG(offsetof(struct fw_interface_out, + management_status) != 0x78c, + "management_status invalid offset"); BUILD_BUG_ON_MSG(offsetof(struct fw_interface_out, trace) != 0x800, "trace invalid offset"); + err = atl2_fw_get_filter_caps(hw); + if (err) + return err; + atl2_shared_buffer_get(hw, link_control, link_control); link_control.mode = ATL2_HOST_MODE_ACTIVE; atl2_shared_buffer_write(hw, link_control, link_control); @@ -236,6 +289,30 @@ static int __atl2_fw_wait_init(struct atl_hw *hw) mtu = ATL_MAX_MTU + ETH_FCS_LEN + ETH_HLEN; atl2_shared_buffer_write(hw, mtu, mtu); + atl2_shared_buffer_get(hw, request_policy, request_policy); + request_policy.bcast.accept = 1; + request_policy.bcast.queue_or_tc = 1; + request_policy.bcast.rx_queue_tc_index = 0; + request_policy.mcast.accept = 1; + request_policy.mcast.queue_or_tc = 1; + request_policy.mcast.rx_queue_tc_index = 0; + request_policy.promisc.queue_or_tc = 1; + request_policy.promisc.rx_queue_tc_index = 0; + atl2_shared_buffer_write(hw, request_policy, request_policy); + + return atl2_shared_buffer_finish_ack(hw); +} + +int atl2_fw_set_filter_policy(struct atl_hw *hw, bool promisc, bool allmulti) +{ + struct request_policy_s request_policy; + + atl2_shared_buffer_get(hw, request_policy, request_policy); + + request_policy.promisc.all = promisc; + request_policy.mcast.promisc = allmulti; + + atl2_shared_buffer_write(hw, request_policy, request_policy); return atl2_shared_buffer_finish_ack(hw); } @@ -498,9 +575,11 @@ static struct atl_link_type *atl2_fw_check_link(struct atl_hw *hw) { struct atl_link_type *link; struct atl_link_state *lstate = &hw->link_state; - struct phy_health_monitor_s phy_health_monitor = {0}; + struct phy_health_monitor_s phy_health_monitor; int ret = 0; + memset(&phy_health_monitor, 0, sizeof(phy_health_monitor)); + atl_lock_fw(hw); __atl2_fw_update_link_status(hw); @@ -700,10 +779,12 @@ static int atl2_fw_enable_wol(struct atl_hw *hw, unsigned int wol_mode) static int atl2_fw_update_thermal(struct atl_hw *hw) { bool enable = !!(hw->thermal.flags & atl_thermal_monitor); - struct phy_health_monitor_s phy_health_monitor = {0}; + struct phy_health_monitor_s phy_health_monitor; struct thermal_shutdown_s thermal_shutdown; int ret = 0; + memset(&phy_health_monitor, 0, sizeof(phy_health_monitor)); + atl_lock_fw(hw); atl2_shared_buffer_get(hw, thermal_shutdown, thermal_shutdown); diff --git a/drivers/net/ethernet/aquantia/atlantic-fwd/atl2_fw.h b/drivers/net/ethernet/aquantia/atlantic-fwd/atl2_fw.h index 292347194fbb1b111da23ab96fa8c9914c625213..5db61f37253f22471f5f91f7bd8a779cad845a04 100644 --- a/drivers/net/ethernet/aquantia/atlantic-fwd/atl2_fw.h +++ b/drivers/net/ethernet/aquantia/atlantic-fwd/atl2_fw.h @@ -80,7 +80,6 @@ struct thermal_shutdown_s { struct mac_address_s { uint8_t mac_address[6]; - uint16_t rsvd; }; struct sleep_proxy_s { @@ -188,7 +187,7 @@ struct sleep_proxy_s { uint32_t rr__offset; } mdns; /* WARN: where this gap actually is not known */ - uint32_t reserveFWGAP:16; + uint32_t reserve_fw_gap:16; }; struct ptp_s { @@ -472,26 +471,64 @@ struct statistics_s { uint32_t tx_good_frames; uint32_t rx_good_frames; - uint32_t reserveFWGAP; + uint32_t reserve_fw_gap; } msm; uint32_t main_loop_cycles; + uint32_t reserve_fw_gap; }; -struct filter_caps_s { - uint8_t unicast_filters_count; - uint8_t multicast_filters_count; - uint8_t ethertype_filters_count; - uint8_t vlan_filters_count; - uint8_t l3_filters_count; - uint8_t l4_filters_count; - uint8_t l4_flex_filters_count; - uint8_t flexible_filters_count; +struct filter_caps_s { + uint8_t l2_filters_base_index:6; + uint8_t flexible_filter_mask:2; + uint8_t l2_filter_count; + uint8_t ethertype_filter_base_index; + uint8_t ethertype_filter_count; + + uint8_t vlan_filter_base_index; + uint8_t vlan_filter_count; + uint8_t l3_ip4_filter_base_index:4; + uint8_t l3_ip4_filter_count:4; + uint8_t l3_ip6_filter_base_index:4; + uint8_t l3_ip6_filter_count:4; + + uint8_t l4_filter_base_index:4; + uint8_t l4_filter_count:4; + uint8_t l4_flex_filter_base_index:4; + uint8_t l4_flex_filter_count:4; + uint8_t rslv_tbl_base_index; + uint8_t rslv_tbl_count; +}; + +struct request_policy_s { + struct { + uint8_t all:1; + uint8_t rsvd:1; + uint8_t rx_queue_tc_index:5; + uint8_t queue_or_tc:1; + } promisc; + + struct { + uint8_t accept:1; + uint8_t rsvd:1; + uint8_t rx_queue_tc_index:5; + uint8_t queue_or_tc:1; + } bcast; + + struct { + uint8_t accept:1; + uint8_t promisc:1; + uint8_t rx_queue_tc_index:5; + uint8_t queue_or_tc:1; + } mcast; + + uint8_t rsvd:8; }; struct fw_interface_in { uint32_t mtu; uint32_t rsvd1:32; struct mac_address_s mac_address; + uint16_t rsvd; struct link_control_s link_control; uint32_t rsvd2:32; struct link_options_s link_options; @@ -504,6 +541,8 @@ struct fw_interface_in { struct cable_diag_control_s cable_diag_control; uint32_t rsvd6:32; struct data_buffer_status_s data_buffer_status; + uint32_t rsvd7:32; + struct request_policy_s request_policy; }; struct transaction_counter_s { @@ -511,6 +550,22 @@ struct transaction_counter_s { uint32_t transaction_cnt_b:16; }; +struct management_status_s { + struct mac_address_s mac_address; + uint16_t vlan; + + struct{ + uint32_t enable : 1; + uint32_t rsvd:31; + } flags; + + uint32_t rsvd1:32; + uint32_t rsvd2:32; + uint32_t rsvd3:32; + uint32_t rsvd4:32; + uint32_t rsvd5:32; +}; + struct fw_interface_out { struct transaction_counter_s transaction_id; struct version_s version; @@ -536,11 +591,11 @@ struct fw_interface_out { uint32_t rsvd11:32; struct statistics_s stats; uint32_t rsvd12:32; - uint32_t rsvd13:32; struct filter_caps_s filter_caps; - uint32_t rsvd14:32; struct device_caps_s device_caps; - uint32_t reserve[30]; + uint32_t rsvd13:32; + struct management_status_s management_status; + uint32_t reserve[21]; struct trace_s trace; }; @@ -629,6 +684,7 @@ enum ATL2_WAKE_REASON { int atl2_fw_init(struct atl_hw *hw); int atl2_get_fw_version(struct atl_hw *hw, u32 *fw_version); +int atl2_fw_set_filter_policy(struct atl_hw *hw, bool promisc, bool allmulti); #endif diff --git a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_common.h b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_common.h index 7476d093ffddf1ea199dbbcf5859849fdc85d3a6..4198f46244ad8a6972cec9b288462205d0889cb9 100644 --- a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_common.h +++ b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_common.h @@ -20,7 +20,7 @@ #include #include -#define ATL_VERSION "1.1.7" +#define ATL_VERSION "1.1.10" struct atl_nic; @@ -34,13 +34,15 @@ struct atl_nic; #include "atl_fwd.h" +struct atl_ptp; + enum { ATL_RXF_VLAN_BASE = 0, ATL_RXF_VLAN_MAX = ATL_VLAN_FLT_NUM, ATL_RXF_ETYPE_BASE = ATL_RXF_VLAN_BASE + ATL_RXF_VLAN_MAX, ATL_RXF_ETYPE_MAX = ATL_ETYPE_FLT_NUM, - /* + 1 is for backward compatibility */ - ATL_RXF_NTUPLE_BASE = ATL_RXF_ETYPE_BASE + ATL_RXF_ETYPE_MAX + 1, + ATL2_RPF_ETYPE_TAGS = 7, + ATL_RXF_NTUPLE_BASE = ATL_RXF_ETYPE_BASE + ATL_RXF_ETYPE_MAX, ATL_RXF_NTUPLE_MAX = ATL_NTUPLE_FLT_NUM, ATL_RXF_FLEX_BASE = ATL_RXF_NTUPLE_BASE + ATL_RXF_NTUPLE_MAX, ATL_RXF_FLEX_MAX = 1, @@ -132,12 +134,20 @@ struct atl_rxf_ntuple { __be16 dst_port[ATL_RXF_NTUPLE_MAX]; __be16 src_port[ATL_RXF_NTUPLE_MAX]; - struct atl2_rxf_l3 l3[ATL_RXF_NTUPLE_MAX]; + struct atl2_rxf_l3 l3v4[ATL_RXF_NTUPLE_MAX]; + struct atl2_rxf_l3 l3v6[ATL_RXF_NTUPLE_MAX]; struct atl2_rxf_l4 l4[ATL_RXF_NTUPLE_MAX]; s8 l3_idx[ATL_RXF_NTUPLE_MAX]; + bool is_ipv6[ATL_RXF_NTUPLE_MAX]; s8 l4_idx[ATL_RXF_NTUPLE_MAX]; uint32_t cmd[ATL_RXF_NTUPLE_MAX]; int count; + int l3_v4_base_index; + int l3_v4_available; + int l3_v6_base_index; + int l3_v6_available; + int l4_base_index; + int l4_available; }; enum atl_vlan_cmd { @@ -157,6 +167,8 @@ struct atl_rxf_vlan { unsigned long map[ATL_VID_MAP_LEN]; int vlans_active; int promisc_count; + int base_index; + int available; }; enum atl_etype_cmd { @@ -168,9 +180,24 @@ enum atl_etype_cmd { ATL_ETYPE_VAL_MASK = BIT(16) - 1, }; +struct atl2_tag_policy { + u16 action; + u16 usage; +}; + struct atl_rxf_etype { uint32_t cmd[ATL_RXF_ETYPE_MAX]; int count; + struct atl2_tag_policy tags_policy[ATL_RXF_ETYPE_MAX]; + int tag[ATL_RXF_ETYPE_MAX]; + int base_index; + int available; + int tag_top; +}; + +struct atl_rxf_mac { + int base_index; + int available; }; enum atl_flex_cmd { @@ -184,6 +211,8 @@ enum atl_flex_cmd { struct atl_rxf_flex { uint32_t cmd[ATL_RXF_FLEX_MAX]; int count; + int base_index; + int available; }; struct atl_queue_vec; @@ -192,7 +221,11 @@ struct atl_queue_vec; #define ATL_FWD_RING_BASE ATL_MAX_QUEUES /* Use TC 1 for offload * engine rings */ #define ATL_NUM_MSI_VECS 32 -#define ATL_NUM_NON_RING_IRQS 1 +enum { + ATL_IRQ_LINK = 0, + ATL_IRQ_PTP, + ATL_NUM_NON_RING_IRQS, +}; #define ATL_RXF_RING_ANY 32 @@ -252,7 +285,11 @@ struct atl_nic { struct atl_rxf_ntuple rxf_ntuple; struct atl_rxf_vlan rxf_vlan; struct atl_rxf_etype rxf_etype; + struct atl_rxf_mac rxf_mac; struct atl_rxf_flex rxf_flex; + + /* PTP support */ + struct atl_ptp *ptp; }; /* Flags only modified with RTNL lock held */ diff --git a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_compat.h b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_compat.h index d267a04c4e741b322a2eb2f56b1e01f613181757..b17e7f42e7c3e8349f8d2b00de54612822ebcbc5 100644 --- a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_compat.h +++ b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_compat.h @@ -17,6 +17,12 @@ #include #include +#include +#include + +#ifndef IS_REACHABLE +#define IS_REACHABLE defined +#endif /* If the kernel is not RHEL / CentOS, then the 2 identifiers below will be * undefined. Define them this way to simplify the checks below. @@ -107,7 +113,7 @@ static inline int skb_xmit_more(struct sk_buff *skb) { return 0; } -#elif LINUX_VERSION_CODE >= KERNEL_VERSION(5,2,0) +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(5, 2, 0) || RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 2) static inline int skb_xmit_more(struct sk_buff *skb) { return netdev_xmit_more(); diff --git a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_desc.h b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_desc.h index 6f871388bc479bd891ecf3d98d1946e85462076d..4dd382d9c2224ebd3265f703c765f78114423757 100644 --- a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_desc.h +++ b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_desc.h @@ -15,7 +15,7 @@ #include #if defined(__LITTLE_ENDIAN_BITFIELD) -struct atl_tx_ctx { +struct __packed atl_tx_ctx { unsigned long long :40; //0 unsigned tun_len:8; //40 unsigned out_len:16; //48 @@ -27,9 +27,9 @@ struct atl_tx_ctx { unsigned l3_len:9; //95 unsigned l4_len:8; //104 unsigned mss_len:16; //112 -} __attribute__((packed)); +}; -struct atl_tx_desc { +struct __packed atl_tx_desc { unsigned long long daddr:64; //0 unsigned type:3; //64 unsigned :1; //67 @@ -41,7 +41,7 @@ struct atl_tx_desc { unsigned ct_idx:1; //108 unsigned ct_en:1; //109 unsigned pay_len:18; //110 -} __attribute__((packed)); +}; #define ATL_DATA_PER_TXD 16384 // despite ->len being 16 bits @@ -65,7 +65,7 @@ enum atl_tx_ctx_cmd { ctx_cmd_tcp = 4, // TCP / ~UDP }; -struct atl_rx_desc { +struct __packed atl_rx_desc { uint64_t daddr; //0 union { struct { @@ -74,9 +74,9 @@ struct atl_rx_desc { }; uint64_t haddr; }; -} __attribute__((packed)); +}; -struct atl_rx_desc_wb { +struct __packed atl_rx_desc_wb { unsigned rss_type:4; //0 unsigned pkt_type:8; //4 unsigned rdm_err:1; //12 @@ -93,7 +93,16 @@ struct atl_rx_desc_wb { unsigned pkt_len:16; //80 unsigned next_desp:16; //96 unsigned vlan_tag:16; //112 -} __attribute__((packed)); +}; + +struct __packed atl_rx_desc_hwts_wb { + u32 sec_hw; + u32 ns; + u32 dd:1; + u32 rsvd:1; + u32 sec_lw0:30; + u32 sec_lw1; +}; enum atl_rx_stat { atl_rx_stat_mac_err = 1, @@ -135,13 +144,16 @@ enum atl_rx_pkt_type { #error XXX Fix bigendian bitfields #endif // defined(__LITTLE_ENDIAN_BITFIELD) -union atl_desc{ +union __packed atl_desc { struct atl_rx_desc rx; - struct atl_rx_desc_wb wb; + union { + struct atl_rx_desc_wb wb; + struct atl_rx_desc_hwts_wb hwts_wb; + }; struct atl_tx_ctx ctx; struct atl_tx_desc tx; uint8_t raw[16]; -}__attribute__((packed)); +}; #endif diff --git a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_ethtool.c b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_ethtool.c index dc77e41f0e53816b0e99f355aab3f0c96ef019c4..d845bd8f9dc75bfbdc202b045146d63f3cad1e61 100644 --- a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_ethtool.c +++ b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_ethtool.c @@ -11,12 +11,15 @@ #include #include +#include +#include "atl_ethtool.h" #include "atl_common.h" #include "atl_mdio.h" #include "atl_ring.h" #include "atl_fwdnl.h" #include "atl_macsec.h" +#include "atl_ptp.h" static uint32_t atl_ethtool_get_link(struct net_device *ndev) { @@ -108,9 +111,11 @@ struct atl_ethtool_compat { static int atl_ethtool_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd) { - struct atl_ethtool_compat cmd_compat = {0}; struct atl_nic *nic = netdev_priv(ndev); struct atl_link_state *lstate = &nic->hw.link_state; + struct atl_ethtool_compat cmd_compat; + + memset(&cmd_compat, 0, sizeof(cmd_compat)); atl_ethtool_get_common(cmd, &cmd_compat, lstate, true); cmd->supported = cmd_compat.link_modes.supported; @@ -1186,6 +1191,38 @@ static int atl_set_coalesce(struct net_device *ndev, return 0; } +static int atl_get_ts_info(struct net_device *ndev, + struct ethtool_ts_info *info) +{ + struct atl_nic *nic = netdev_priv(ndev); + struct ptp_clock *ptp_clock; + + ethtool_op_get_ts_info(ndev, info); + + if (!nic->ptp) + return 0; + + info->so_timestamping |= + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + info->tx_types = BIT(HWTSTAMP_TX_OFF) | + BIT(HWTSTAMP_TX_ON); + + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE); + + info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | + BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | + BIT(HWTSTAMP_FILTER_PTP_V2_EVENT); + + ptp_clock = atl_ptp_get_ptp_clock(nic); + if (ptp_clock) + info->phc_index = ptp_clock_index(ptp_clock); + + return 0; +} + struct atl_rxf_flt_desc { int base; int max; @@ -1281,6 +1318,16 @@ static int atl_rxf_get_ntuple(const struct atl_rxf_flt_desc *desc, if (!(cmd & ATL_RXF_EN)) return -EINVAL; +#ifdef ATL_HAVE_IPV6_NTUPLE + if (cmd & ATL_NTC_V6) { + fsp->flow_type = IPV6_USER_FLOW; + } else +#endif + { + fsp->flow_type = IPV4_USER_FLOW; + fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; + } + if (cmd & ATL_NTC_PROTO) { switch (cmd & ATL_NTC_L4_MASK) { case ATL_NTC_L4_TCP: @@ -1298,18 +1345,21 @@ static int atl_rxf_get_ntuple(const struct atl_rxf_flt_desc *desc, SCTP_V6_FLOW : SCTP_V4_FLOW; break; - default: - return -EINVAL; - } - } else { + case ATL_NTC_L4_ICMP: #ifdef ATL_HAVE_IPV6_NTUPLE - if (cmd & ATL_NTC_V6) { - fsp->flow_type = IPV6_USER_FLOW; - } else + if (cmd & ATL_NTC_V6) { + fsp->h_u.usr_ip6_spec.l4_proto = IPPROTO_ICMPV6; + fsp->m_u.usr_ip6_spec.l4_proto = 0xff; + } else #endif - { - fsp->flow_type = IPV4_USER_FLOW; - fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; + { + fsp->h_u.usr_ip4_spec.proto = IPPROTO_ICMP; + fsp->m_u.usr_ip4_spec.proto = 0xff; + } + break; + + default: + return -EINVAL; } } @@ -1527,13 +1577,13 @@ static uint32_t atl_rxf_find_vid(struct atl_nic *nic, uint16_t vid, bool try_repl) { struct atl_rxf_vlan *vlan = &nic->rxf_vlan; - int idx, free = ATL_RXF_VLAN_MAX, repl = ATL_RXF_VLAN_MAX; + int idx, free = vlan->available, repl = vlan->available; - for (idx = 0; idx < ATL_RXF_VLAN_MAX; idx++) { + for (idx = 0; idx < vlan->available; idx++) { uint32_t cmd = vlan->cmd[idx]; if (!(cmd & ATL_RXF_EN)) { - if (free == ATL_RXF_VLAN_MAX) { + if (free == vlan->available) { free = idx; if (vid == 0xffff) break; @@ -1544,7 +1594,7 @@ static uint32_t atl_rxf_find_vid(struct atl_nic *nic, uint16_t vid, if ((cmd & ATL_VLAN_VID_MASK) == vid) return idx | ATL_VIDX_FOUND; - if (try_repl && repl == ATL_RXF_VLAN_MAX && + if (try_repl && repl == vlan->available && (cmd & ATL_RXF_ACT_TOHOST) && !(cmd & ATL_VLAN_RXQ)) { @@ -1555,10 +1605,10 @@ static uint32_t atl_rxf_find_vid(struct atl_nic *nic, uint16_t vid, } } - if (free != ATL_RXF_VLAN_MAX) + if (free != vlan->available) return free | ATL_VIDX_FREE; - if (try_repl && repl != ATL_RXF_VLAN_MAX) + if (try_repl && repl != vlan->available) return repl | ATL_VIDX_REPL; return ATL_VIDX_NONE; @@ -1575,7 +1625,7 @@ static int atl_rxf_dup_vid(struct atl_rxf_vlan *vlan, int idx, uint16_t vid) { int i; - for (i = 0; i < ATL_RXF_VLAN_MAX; i++) { + for (i = 0; i < vlan->available; i++) { if (i == idx) continue; @@ -1600,6 +1650,9 @@ static int atl_rxf_set_vlan(const struct atl_rxf_flt_desc *desc, int dup; idx = atl_rxf_idx(desc, fsp); + if (idx >= vlan->available) + return -ENOSPC; + dup = atl_rxf_dup_vid(vlan, idx, vid); if (dup >= 0) { atl_nic_err("Can't add duplicate VLAN filter @%d (existing @%d)\n", @@ -1656,6 +1709,37 @@ static int atl_rxf_set_vlan(const struct atl_rxf_flt_desc *desc, return !present; } +/** Find tag with the same action or new free tag + * top - top inclusive tag value + * action - action for ActionResolverTable + */ +static inline int atl2_filter_tag_get(struct atl2_tag_policy *tags, + int top, u16 action) +{ + int i; + + for (i = 1; i <= top; i++) + if ((tags[i].usage > 0) && (tags[i].action == action)) { + tags[i].usage++; + return i; + } + + for (i = 1; i <= top; i++) + if (tags[i].usage == 0) { + tags[i].usage = 1; + tags[i].action = action; + return i; + } + + return -1; +} + +static inline void atl2_filter_tag_put(struct atl2_tag_policy *tags, int tag) +{ + if (tags[tag].usage > 0) + tags[tag].usage--; +} + static int atl_rxf_set_etype(const struct atl_rxf_flt_desc *desc, struct atl_nic *nic, struct ethtool_rx_flow_spec *fsp) { @@ -1680,12 +1764,35 @@ static int atl_rxf_set_etype(const struct atl_rxf_flt_desc *desc, if (fsp->m_u.ether_spec.h_proto != 0xffff) return -EINVAL; + if (idx >= etype->available) + return -ENOSPC; + cmd |= ntohs(fsp->h_u.ether_spec.h_proto); ret = atl_rxf_set_ring(desc, nic, fsp, &cmd); if (ret) return ret; + if (nic->hw.new_rpf) { + uint16_t action; + + if (!(cmd & ATL_RXF_ACT_TOHOST)) { + action = ATL2_ACTION_DROP; + } else if (!(cmd & ATL_ETYPE_RXQ)) { + action = ATL2_ACTION_ASSIGN_TC(0); + } else { + int queue = (cmd >> ATL_ETYPE_RXQ_SHIFT) & ATL_RXF_RXQ_MSK; + + action = ATL2_ACTION_ASSIGN_QUEUE(queue); + } + + etype->tag[idx] = atl2_filter_tag_get(etype->tags_policy, + etype->tag_top, + action); + if (etype->tag[idx] < 0) + return -ENOSPC; + } + etype->cmd[idx] = cmd; return !present; @@ -1735,9 +1842,13 @@ static int atl2_rxf_l4_is_equal(struct atl2_rxf_l4 *f1, struct atl2_rxf_l4 *f2) return true; } -static void atl2_rpf_l3_cmd_set(struct atl_hw *hw, u32 val, u32 idx) +static void atl2_rxf_write_l3_cmd(struct atl_hw *hw, int l3_idx, bool is_ipv6, + uint32_t cmd) { - atl_write_mask_bits(hw, ATL2_RPF_L3_FLT(idx), 0xFF7FFFFF, val); + uint32_t mask = is_ipv6 ? 0xFF7F0000 : 0x0000FFFF; + uint32_t value = (atl_read(hw, ATL2_RPF_L3_FLT(l3_idx)) & ~mask) | cmd; + + atl_write(hw, ATL2_RPF_L3_FLT(l3_idx), value); } static void atl2_rxf_l3_put(struct atl_hw *hw, struct atl2_rxf_l3 *l3, int idx) @@ -1746,8 +1857,8 @@ static void atl2_rxf_l3_put(struct atl_hw *hw, struct atl2_rxf_l3 *l3, int idx) l3->usage--; if (!l3->usage) { + atl2_rxf_write_l3_cmd(hw, idx, l3->cmd & ATL2_NTC_L3_IPV6_EN, 0); l3->cmd = 0; - atl2_rpf_l3_cmd_set(hw, l3->cmd, idx); } } @@ -1785,116 +1896,186 @@ static void atl2_rxf_l4_get(struct atl2_rxf_l4 *l4, int idx, l4->dst_port = _l4->dst_port; } -static void atl2_rxf_set_ntuple(struct atl_nic *nic, - struct atl_rxf_ntuple *ntuple, - int idx) +static void atl2_rxf_configure_l3l4(struct atl_rxf_ntuple *ntuple, int idx, + struct atl2_rxf_l3 *l3, + struct atl2_rxf_l4 *l4) { - struct atl2_rxf_l3 l3; - struct atl2_rxf_l4 l4; - s8 l3_idx = -1; - s8 l4_idx = -1; - int i; - - memset(&l3, 0, sizeof(l3)); - memset(&l4, 0, sizeof(l4)); - if (ntuple->cmd[idx] & ATL_NTC_PROTO) - l3.cmd |= ntuple->cmd[idx] & ATL_NTC_V6 ? + l3->cmd |= ntuple->cmd[idx] & ATL_NTC_V6 ? ATL2_NTC_L3_IPV6_PROTO | ATL2_NTC_L3_IPV6_EN : ATL2_NTC_L3_IPV4_PROTO | ATL2_NTC_L3_IPV4_EN; switch (ntuple->cmd[idx] & ATL_NTC_L4_MASK) { case ATL_NTC_L4_TCP: - l3.cmd |= ntuple->cmd[idx] & ATL_NTC_V6 ? + l3->cmd |= ntuple->cmd[idx] & ATL_NTC_V6 ? IPPROTO_TCP << ATL2_NTC_L3_IPV6_PROTO_SHIFT : IPPROTO_TCP << ATL2_NTC_L3_IPV4_PROTO_SHIFT; break; case ATL_NTC_L4_UDP: - l3.cmd |= ntuple->cmd[idx] & ATL_NTC_V6 ? + l3->cmd |= ntuple->cmd[idx] & ATL_NTC_V6 ? IPPROTO_UDP << ATL2_NTC_L3_IPV6_PROTO_SHIFT : IPPROTO_UDP << ATL2_NTC_L3_IPV4_PROTO_SHIFT; break; case ATL_NTC_L4_SCTP: - l3.cmd |= ntuple->cmd[idx] & ATL_NTC_V6 ? + l3->cmd |= ntuple->cmd[idx] & ATL_NTC_V6 ? IPPROTO_SCTP << ATL2_NTC_L3_IPV6_PROTO_SHIFT : IPPROTO_SCTP << ATL2_NTC_L3_IPV4_PROTO_SHIFT; break; + + case ATL_NTC_L4_ICMP: +#ifdef ATL_HAVE_IPV6_NTUPLE + l3->cmd |= ntuple->cmd[idx] & ATL_NTC_V6 ? + IPPROTO_ICMPV6 << ATL2_NTC_L3_IPV6_PROTO_SHIFT : + IPPROTO_ICMP << ATL2_NTC_L3_IPV4_PROTO_SHIFT; +#else + l3->cmd |= IPPROTO_ICMP << ATL2_NTC_L3_IPV4_PROTO_SHIFT; +#endif + break; + } if (ntuple->cmd[idx] & ATL_NTC_SA) { if (ntuple->cmd[idx] & ATL_NTC_V6) { - l3.cmd |= ATL2_NTC_L3_IPV6_SA | ATL2_NTC_L3_IPV6_EN; - memcpy(l3.src_ip6, ntuple->src_ip6[idx], 16); + l3->cmd |= ATL2_NTC_L3_IPV6_SA | ATL2_NTC_L3_IPV6_EN; + memcpy(l3->src_ip6, ntuple->src_ip6[idx], 16); } else { - l3.cmd |= ATL2_NTC_L3_IPV4_SA | ATL2_NTC_L3_IPV4_EN; - l3.src_ip4 = ntuple->src_ip4[idx]; + l3->cmd |= ATL2_NTC_L3_IPV4_SA | ATL2_NTC_L3_IPV4_EN; + l3->src_ip4 = ntuple->src_ip4[idx]; } } if (ntuple->cmd[idx] & ATL_NTC_DA) { if (ntuple->cmd[idx] & ATL_NTC_V6) { - l3.cmd |= ATL2_NTC_L3_IPV6_DA | ATL2_NTC_L3_IPV6_EN; - memcpy(l3.dst_ip6, ntuple->dst_ip6[idx], 16); + l3->cmd |= ATL2_NTC_L3_IPV6_DA | ATL2_NTC_L3_IPV6_EN; + memcpy(l3->dst_ip6, ntuple->dst_ip6[idx], 16); } else { - l3.cmd |= ATL2_NTC_L3_IPV4_DA | ATL2_NTC_L3_IPV4_EN; - l3.dst_ip4 = ntuple->dst_ip4[idx]; + l3->cmd |= ATL2_NTC_L3_IPV4_DA | ATL2_NTC_L3_IPV4_EN; + l3->dst_ip4 = ntuple->dst_ip4[idx]; } } if (ntuple->cmd[idx] & ATL_NTC_SP) { - l4.cmd |= ATL2_NTC_L4_SP | ATL2_NTC_L4_EN; - l4.src_port = ntuple->src_port[idx]; + l4->cmd |= ATL2_NTC_L4_SP | ATL2_NTC_L4_EN; + l4->src_port = ntuple->src_port[idx]; } if (ntuple->cmd[idx] & ATL_NTC_DP) { - l4.cmd |= ATL2_NTC_L4_DP | ATL2_NTC_L4_EN; - l4.dst_port = ntuple->dst_port[idx]; + l4->cmd |= ATL2_NTC_L4_DP | ATL2_NTC_L4_EN; + l4->dst_port = ntuple->dst_port[idx]; } +} - /* find L3 and L4 filters */ - if (l3.cmd & (ATL2_NTC_L3_IPV4_EN | ATL2_NTC_L3_IPV6_EN)) { - for (i = 0; i < ATL_RXF_NTUPLE_MAX; i++) { - if (atl2_rxf_l3_is_equal(&ntuple->l3[i], &l3)) { +static int atl2_rxf_fl3l4_find_l3(struct atl_rxf_ntuple *ntuple, + struct atl2_rxf_l3 *l3) +{ + struct atl2_rxf_l3 *nl3 = (l3->cmd & ATL2_NTC_L3_IPV4_EN) ? + ntuple->l3v4 : ntuple->l3v6; + int first = (l3->cmd & ATL2_NTC_L3_IPV4_EN) ? + ntuple->l3_v4_base_index : + ntuple->l3_v6_base_index; + int last = first + (l3->cmd & ATL2_NTC_L3_IPV4_EN) ? + ntuple->l3_v4_available : + ntuple->l3_v6_available; + int l3_idx = -1; + int i; + + for (i = first; i < last; i++) { + if (atl2_rxf_l3_is_equal(&nl3[i], l3)) { + l3_idx = i; + break; + } + } + if (l3_idx < 0) + for (i = first; i < last; i++) + if ((nl3[i].cmd & (ATL2_NTC_L3_IPV4_EN | + ATL2_NTC_L3_IPV6_EN)) == 0) { l3_idx = i; break; } + if (l3_idx < 0) + return -ENOSPC; + + return l3_idx; +} + +static int atl2_rxf_fl3l4_find_l4(struct atl_rxf_ntuple *ntuple, + struct atl2_rxf_l4 *l4) +{ + int l4_idx = -1; + int i; + + for (i = ntuple->l4_base_index; i < ntuple->l4_available; i++) { + if (atl2_rxf_l4_is_equal(&ntuple->l4[i], l4)) + l4_idx = i; + } + if (l4_idx >= 0) + return l4_idx; + + for (i = ntuple->l4_base_index; i < ntuple->l4_available; i++) { + if ((ntuple->l4[i].cmd & ATL2_NTC_L4_EN) == 0) { + l4_idx = i; + break; } + } + if (l4_idx < 0) + return -ENOSPC; + return l4_idx; +} + +static int atl2_rxf_set_ntuple(struct atl_nic *nic, + struct atl_rxf_ntuple *ntuple, + int idx) +{ + struct atl2_rxf_l3 l3; + struct atl2_rxf_l4 l4; + struct atl2_rxf_l3 *l3_filters; + s8 l3_idx = -1; + s8 l4_idx = -1; + + memset(&l3, 0, sizeof(l3)); + memset(&l4, 0, sizeof(l4)); + atl2_rxf_configure_l3l4(ntuple, idx, &l3, &l4); + + /* find L3 and L4 filters */ + if (l3.cmd & (ATL2_NTC_L3_IPV4_EN | ATL2_NTC_L3_IPV6_EN)) { + l3_idx = atl2_rxf_fl3l4_find_l3(ntuple, &l3); if (l3_idx < 0) - for (i = 0; i < ATL_RXF_NTUPLE_MAX; i++) - if ((ntuple->l3[i].cmd & - (ATL2_NTC_L3_IPV4_EN | - ATL2_NTC_L3_IPV6_EN)) == 0) { - l3_idx = i; - break; - } - WARN(l3_idx < 0, "L3 filter table inconsistent"); + return l3_idx; + } + + if (l4.cmd & ATL2_NTC_L4_EN) { + l4_idx = atl2_rxf_fl3l4_find_l4(ntuple, &l4); + if (l4_idx < 0) + return l4_idx; + + if (ntuple->l4_idx[idx] != l4_idx) + atl2_rxf_l4_get(&ntuple->l4[l4_idx], l4_idx, &l4); + } + + if (l3.cmd & (ATL2_NTC_L3_IPV4_EN | ATL2_NTC_L3_IPV6_EN)) { + if (l3.cmd & ATL2_NTC_L3_IPV4_EN) + l3_filters = ntuple->l3v4; + else + l3_filters = ntuple->l3v6; + if (ntuple->l3_idx[idx] != l3_idx) - atl2_rxf_l3_get(&ntuple->l3[l3_idx], l3_idx, &l3); + atl2_rxf_l3_get(&l3_filters[l3_idx], l3_idx, &l3); } - if (ntuple->l3_idx[idx] != -1) + /* release old filter */ + if (ntuple->l3_idx[idx] != -1) { + if (ntuple->is_ipv6[idx]) + l3_filters = ntuple->l3v6; + else + l3_filters = ntuple->l3v4; + if (!(atl2_rxf_l3_is_equal(&l3, - &ntuple->l3[ntuple->l3_idx[idx]]))) { + &l3_filters[ntuple->l3_idx[idx]]))) { atl2_rxf_l3_put(&nic->hw, - &ntuple->l3[ntuple->l3_idx[idx]], + &l3_filters[ntuple->l3_idx[idx]], ntuple->l3_idx[idx]); } - ntuple->l3_idx[idx] = l3_idx; - - if (l4.cmd & ATL2_NTC_L4_EN) { - for (i = 0; i < ATL_RXF_NTUPLE_MAX; i++) { - if (atl2_rxf_l4_is_equal(&ntuple->l4[i], &l4)) - l4_idx = i; - } - if (l4_idx < 0) - for (i = 0; i < ATL_RXF_NTUPLE_MAX; i++) - if ((ntuple->l4[i].cmd & ATL2_NTC_L4_EN) == 0) { - l4_idx = i; - break; - } - WARN(l4_idx < 0, "L4 filter table inconsistent"); - if (ntuple->l4_idx[idx] != l4_idx) - atl2_rxf_l4_get(&ntuple->l4[l4_idx], l4_idx, &l4); } + ntuple->l3_idx[idx] = l3_idx; if (ntuple->l4_idx[idx] != -1) if (!(atl2_rxf_l4_is_equal(&l4, @@ -1904,6 +2085,10 @@ static void atl2_rxf_set_ntuple(struct atl_nic *nic, ntuple->l4_idx[idx]); } ntuple->l4_idx[idx] = l4_idx; + + ntuple->is_ipv6[idx] = (l3.cmd & ATL2_NTC_L3_IPV4_EN) ? false : true; + + return 0; } static int atl_rxf_set_ntuple(const struct atl_rxf_flt_desc *desc, @@ -1934,11 +2119,18 @@ static int atl_rxf_set_ntuple(const struct atl_rxf_flt_desc *desc, case IPV6_USER_FLOW: if (fsp->m_u.usr_ip6_spec.l4_4_bytes != 0 || - fsp->m_u.usr_ip6_spec.tclass != 0 || - fsp->m_u.usr_ip6_spec.l4_proto != 0) { + fsp->m_u.usr_ip6_spec.tclass != 0) { + atl_nic_err("Unsupported match field\n"); + return -EINVAL; + } + + if (fsp->h_u.usr_ip6_spec.l4_proto == IPPROTO_ICMPV6) { + cmd |= ATL_NTC_L4_ICMP | ATL_NTC_PROTO; + } else if (fsp->m_u.usr_ip6_spec.l4_proto != 0) { atl_nic_err("Unsupported match field\n"); return -EINVAL; } + cmd |= ATL_NTC_V6; break; #endif @@ -1955,9 +2147,15 @@ static int atl_rxf_set_ntuple(const struct atl_rxf_flt_desc *desc, case IPV4_USER_FLOW: if (fsp->m_u.usr_ip4_spec.l4_4_bytes != 0 || - fsp->m_u.usr_ip4_spec.tos != 0 || - fsp->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4 || - fsp->h_u.usr_ip4_spec.proto != 0) { + fsp->m_u.usr_ip4_spec.tos != 0 || + fsp->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4) { + atl_nic_err("Unsupported match field\n"); + return -EINVAL; + } + + if (fsp->h_u.usr_ip4_spec.proto == IPPROTO_ICMP) { + cmd |= ATL_NTC_L4_ICMP | ATL_NTC_PROTO; + } else if (fsp->m_u.usr_ip4_spec.proto != 0) { atl_nic_err("Unsupported match field\n"); return -EINVAL; } @@ -1988,12 +2186,7 @@ static int atl_rxf_set_ntuple(const struct atl_rxf_flt_desc *desc, if (cmd & ATL_NTC_V6) { int i; - if (nic->hw.new_rpf) { - if (idx > 5) { - atl_nic_err("IPv6 filters allowed in the first 6 locations\n"); - return -EINVAL; - } - } else { + if (!nic->hw.new_rpf) { if (idx & 3) { atl_nic_err("IPv6 filters only supported in locations 8 and 12\n"); return -EINVAL; @@ -2083,8 +2276,11 @@ static int atl_rxf_set_ntuple(const struct atl_rxf_flt_desc *desc, ntuple->cmd[idx] = cmd; - if (nic->hw.new_rpf) - atl2_rxf_set_ntuple(nic, ntuple, idx); + if (nic->hw.new_rpf) { + ret = atl2_rxf_set_ntuple(nic, ntuple, idx); + if (ret < 0) + return ret; + } return !present; } @@ -2109,18 +2305,19 @@ static int atl_rxf_set_flex(const struct atl_rxf_flt_desc *desc, static void atl_rxf_update_vlan(struct atl_nic *nic, int idx) { - uint32_t cmd = nic->rxf_vlan.cmd[idx]; + struct atl_rxf_vlan *vlan = &nic->rxf_vlan; + uint32_t cmd = vlan->cmd[idx]; struct atl_hw *hw = &nic->hw; u16 action; - atl_write(&nic->hw, ATL_RX_VLAN_FLT(idx), cmd); + atl_write(&nic->hw, ATL_RX_VLAN_FLT(vlan->base_index + idx), cmd); if (!nic->hw.new_rpf) return; if (!(cmd & ATL_RXF_EN)) { atl2_act_rslvr_table_set(hw, - ATL2_RPF_VLAN_USER_INDEX + idx, + hw->art_base_index + ATL2_RPF_VLAN_USER_INDEX + idx, 0, 0, ATL2_ACTION_DISABLE); @@ -2130,7 +2327,7 @@ static void atl_rxf_update_vlan(struct atl_nic *nic, int idx) if (!(cmd & ATL_RXF_ACT_TOHOST)) { action = ATL2_ACTION_DROP; } else if (!(cmd & ATL_VLAN_RXQ)) { - atl2_rpf_vlan_flr_tag_set(hw, 1, idx); + atl2_rpf_vlan_flr_tag_set(hw, 1, vlan->base_index + idx); return; } else { int queue = (cmd >> ATL_VLAN_RXQ_SHIFT) & ATL_RXF_RXQ_MSK; @@ -2138,49 +2335,45 @@ static void atl_rxf_update_vlan(struct atl_nic *nic, int idx) action = ATL2_ACTION_ASSIGN_QUEUE(queue); } - atl2_rpf_vlan_flr_tag_set(hw, idx + 2, idx); + atl2_rpf_vlan_flr_tag_set(hw, idx + 2, vlan->base_index + idx); atl2_act_rslvr_table_set(hw, - ATL2_RPF_VLAN_USER_INDEX + idx, + hw->art_base_index + ATL2_RPF_VLAN_USER_INDEX + idx, (idx + 2) << ATL2_RPF_TAG_VLAN_OFFSET, ATL2_RPF_TAG_VLAN_MASK, action); - } static void atl_rxf_update_etype(struct atl_nic *nic, int idx) { - uint32_t cmd = nic->rxf_etype.cmd[idx]; + struct atl_rxf_etype *etype = &nic->rxf_etype; + uint32_t cmd = etype->cmd[idx]; struct atl_hw *hw = &nic->hw; - u16 action; + u16 action, index; - atl_write(&nic->hw, ATL_RX_ETYPE_FLT(idx), cmd); + atl_write(&nic->hw, ATL_RX_ETYPE_FLT(etype->base_index + idx), cmd); if (!nic->hw.new_rpf) return; if (!(cmd & ATL_RXF_EN)) { + atl2_filter_tag_put(etype->tags_policy, + etype->tag[idx]); + index = hw->art_base_index + + ATL2_RPF_ET_PCP_USER_INDEX + idx; atl2_act_rslvr_table_set(hw, - ATL2_RPF_ET_PCP_USER_INDEX + idx, + index, 0, 0, ATL2_ACTION_DISABLE); return; } - if (!(cmd & ATL_RXF_ACT_TOHOST)) { - action = ATL2_ACTION_DROP; - } else if (!(cmd & ATL_ETYPE_RXQ)) { - action = ATL2_ACTION_ASSIGN_TC(0); - } else { - int queue = (cmd >> ATL_ETYPE_RXQ_SHIFT) & ATL_RXF_RXQ_MSK; - - action = ATL2_ACTION_ASSIGN_QUEUE(queue); - } - - atl2_rpf_etht_flr_tag_set(hw, idx + 1, idx); + atl2_rpf_etht_flr_tag_set(hw, etype->tag[idx], etype->base_index + idx); + action = etype->tags_policy[etype->tag[idx]].action; + index = hw->art_base_index + ATL2_RPF_ET_PCP_USER_INDEX + idx; atl2_act_rslvr_table_set(hw, - ATL2_RPF_ET_PCP_USER_INDEX + idx, - (idx + 1) << ATL2_RPF_TAG_ET_OFFSET, + index, + etype->tag[idx] << ATL2_RPF_TAG_ET_OFFSET, ATL2_RPF_TAG_ET_MASK, action); } @@ -2189,15 +2382,18 @@ static void atl2_update_ntuple_flt(struct atl_nic *nic, int idx) { struct atl_hw *hw = &nic->hw; struct atl_rxf_ntuple *ntuple = &nic->rxf_ntuple; + uint32_t tag = 0, mask = 0, action, cmd; + struct atl2_rxf_l3 *l3_filters; struct atl2_rxf_l3 *l3 = NULL; struct atl2_rxf_l4 *l4 = NULL; s8 l3_idx = ntuple->l3_idx[idx]; s8 l4_idx = ntuple->l4_idx[idx]; - uint32_t tag = 0, mask = 0, action, cmd; + bool is_ipv6 = ntuple->is_ipv6[idx]; + l3_filters = ntuple->is_ipv6[idx] ? ntuple->l3v6 : ntuple->l3v4; if (!(ntuple->cmd[idx] & ATL_NTC_EN)) { if (l3_idx > -1) - atl2_rxf_l3_put(hw, &ntuple->l3[l3_idx], l3_idx); + atl2_rxf_l3_put(hw, &l3_filters[l3_idx], l3_idx); if (l4_idx > -1) atl2_rxf_l4_put(hw, &ntuple->l4[l4_idx], l4_idx); @@ -2205,7 +2401,7 @@ static void atl2_update_ntuple_flt(struct atl_nic *nic, int idx) ntuple->l4_idx[idx] = -1; ntuple->l3_idx[idx] = -1; atl2_act_rslvr_table_set(hw, - ATL2_RPF_L3L4_USER_INDEX + idx, + hw->art_base_index + ATL2_RPF_L3L4_USER_INDEX + idx, 0, 0, ATL2_ACTION_DISABLE); @@ -2213,7 +2409,7 @@ static void atl2_update_ntuple_flt(struct atl_nic *nic, int idx) return; } if (l3_idx > -1) { - l3 = &ntuple->l3[l3_idx]; + l3 = &l3_filters[l3_idx]; cmd = l3->cmd; if (l3->cmd & ATL2_NTC_L3_IPV4_EN) { tag |= (l3_idx + 1) << ATL2_RPF_TAG_L3_V4_OFFSET; @@ -2238,7 +2434,7 @@ static void atl2_update_ntuple_flt(struct atl_nic *nic, int idx) return; } - atl2_rpf_l3_cmd_set(hw, cmd, l3_idx); + atl2_rxf_write_l3_cmd(hw, l3_idx, is_ipv6, cmd); } if (l4_idx > -1) { @@ -2269,10 +2465,10 @@ static void atl2_update_ntuple_flt(struct atl_nic *nic, int idx) } atl2_act_rslvr_table_set(hw, - ATL2_RPF_L3L4_USER_INDEX + idx, - tag, - mask, - action); + hw->art_base_index + ATL2_RPF_L3L4_USER_INDEX + idx, + tag, + mask, + action); } void atl_update_ntuple_flt(struct atl_nic *nic, int idx) @@ -2282,11 +2478,12 @@ void atl_update_ntuple_flt(struct atl_nic *nic, int idx) uint32_t cmd = ntuple->cmd[idx]; int i; + if (nic->hw.new_rpf) + return atl2_update_ntuple_flt(nic, idx); + if (!(cmd & ATL_NTC_EN)) { atl_write(hw, ATL_NTUPLE_CTRL(idx), cmd); - if (nic->hw.new_rpf) - atl2_update_ntuple_flt(nic, idx); return; } @@ -2327,19 +2524,19 @@ void atl_update_ntuple_flt(struct atl_nic *nic, int idx) cmd |= 1 << ATL_NTC_ACT_SHIFT; atl_write(hw, ATL_NTUPLE_CTRL(idx), cmd); - - if (nic->hw.new_rpf) - atl2_update_ntuple_flt(nic, idx); } static void atl_rxf_update_flex(struct atl_nic *nic, int idx) { - atl_write(&nic->hw, ATL_RX_FLEX_FLT_CTRL(idx), nic->rxf_flex.cmd[idx]); + atl_write(&nic->hw, + ATL_RX_FLEX_FLT_CTRL(nic->rxf_flex.base_index + idx), + nic->rxf_flex.cmd[idx]); if (nic->hw.new_rpf) { uint32_t action; - atl2_rpf_flex_flr_tag_set(&nic->hw, idx + 1, idx); + atl2_rpf_flex_flr_tag_set(&nic->hw, idx + 1, + nic->rxf_flex.base_index + idx); if (!(nic->rxf_flex.cmd[idx] & ATL_FLEX_EN)) { action = ATL2_ACTION_DISABLE; @@ -2353,14 +2550,14 @@ static void atl_rxf_update_flex(struct atl_nic *nic, int idx) action = ATL2_ACTION_ASSIGN_QUEUE(queue); } atl2_act_rslvr_table_set(&nic->hw, - ATL2_RPF_FLEX_USER_INDEX + idx, + nic->hw.art_base_index + ATL2_RPF_FLEX_USER_INDEX + idx, (idx + 1) << ATL2_RPF_TAG_FLEX_OFFSET, ATL2_RPF_TAG_FLEX_MASK, action); } } -static const struct atl_rxf_flt_desc atl_rxf_descs[] = { +static struct atl_rxf_flt_desc atl_rxf_descs[] = { { .base = ATL_RXF_VLAN_BASE, .max = ATL_RXF_VLAN_MAX, @@ -2408,6 +2605,46 @@ static const struct atl_rxf_flt_desc atl_rxf_descs[] = { }, }; +s8 atl_reserve_filter(enum atl_rxf_type type) +{ + switch (type) { + case ATL_RXF_ETYPE: + WARN_ONCE(atl_rxf_descs[type].max != ATL_RXF_ETYPE_MAX, + "already reserved"); + atl_rxf_descs[type].max--; + return atl_rxf_descs[type].max; + case ATL_RXF_NTUPLE: + WARN_ONCE(atl_rxf_descs[type].max != ATL_RXF_NTUPLE_MAX, + "already reserved"); + atl_rxf_descs[type].max--; + return atl_rxf_descs[type].max; + default: + WARN_ONCE(true, "unexpected type"); + break; + } + + return -1; +} + +void atl_release_filter(enum atl_rxf_type type) +{ + switch (type) { + case ATL_RXF_ETYPE: + WARN_ONCE(atl_rxf_descs[type].max == ATL_RXF_ETYPE_MAX, + "already released"); + atl_rxf_descs[type].max++; + break; + case ATL_RXF_NTUPLE: + WARN_ONCE(atl_rxf_descs[type].max == ATL_RXF_NTUPLE_MAX, + "already released"); + atl_rxf_descs[type].max++; + break; + default: + WARN_ONCE(true, "unexpected type"); + break; + } +} + static uint32_t *atl_rxf_cmd(const struct atl_rxf_flt_desc *desc, struct atl_nic *nic) { @@ -2483,7 +2720,7 @@ static bool atl_vlan_pull_from_promisc(struct atl_nic *nic, uint32_t idx) return false; memcpy(map, vlan->map, ATL_VID_MAP_LEN * sizeof(*map)); - for (i = 0; i < ATL_RXF_VLAN_MAX; i++) { + for (i = 0; i < vlan->available; i++) { uint32_t cmd = vlan->cmd[i]; if (cmd & ATL_RXF_EN) @@ -2863,6 +3100,7 @@ const struct ethtool_ops atl_ethtool_ops = { .set_priv_flags = atl_set_priv_flags, .get_coalesce = atl_get_coalesce, .set_coalesce = atl_set_coalesce, + .get_ts_info = atl_get_ts_info, .get_wol = atl_get_wol, .set_wol = atl_set_wol, .begin = atl_ethtool_begin, diff --git a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_ethtool.h b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_ethtool.h new file mode 100644 index 0000000000000000000000000000000000000000..7fc88ea2c0807ab92cdf827563886602bc62b544 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_ethtool.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Atlantic Network Driver + * + * Copyright (C) 2019 aQuantia Corporation + * Copyright (C) 2019-2020 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _ATL_ETHTOOL_H_ +#define _ATL_ETHTOOL_H_ + +enum atl_rxf_type { + ATL_RXF_VLAN = 0, + ATL_RXF_ETYPE, + ATL_RXF_NTUPLE, + ATL_RXF_FLEX, +}; + +s8 atl_reserve_filter(enum atl_rxf_type type); +void atl_release_filter(enum atl_rxf_type type); + +#endif diff --git a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_fw.c b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_fw.c index b04ec5090bd327d4dff7228def256904ff70c112..a7056852677b7152af7f4fb2e814cfdd7757e3b5 100644 --- a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_fw.c +++ b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_fw.c @@ -223,6 +223,7 @@ static int __atl_fw1_get_link_caps(struct atl_hw *hw) /* fw lock must be held */ static int __atl_fw2_get_link_caps(struct atl_hw *hw) { + struct atl_nic *nic = container_of(hw, struct atl_nic, hw); struct atl_mcp *mcp = &hw->mcp; uint32_t fw_stat_addr = mcp->fw_stat_addr; struct atl_link_type *rate; @@ -262,6 +263,21 @@ static int __atl_fw2_get_link_caps(struct atl_hw *hw) hw->link_state.supported = supported; hw->link_state.lp_lowest = fls(supported) - 1; + nic->rxf_flex.base_index = 0; + nic->rxf_flex.available = ATL_FLEX_FLT_NUM; + nic->rxf_mac.base_index = 0; + nic->rxf_mac.available = ATL_UC_FLT_NUM; + nic->rxf_etype.base_index = 0; + nic->rxf_etype.available = ATL_ETYPE_FLT_NUM - 1; /* 1 reserved by FW */ + nic->rxf_vlan.base_index = 0; + nic->rxf_vlan.available = ATL_VLAN_FLT_NUM; + nic->rxf_ntuple.l3_v4_base_index = 0; + nic->rxf_ntuple.l3_v4_available = ATL_NTUPLE_FLT_NUM; + nic->rxf_ntuple.l3_v6_base_index = 0; + nic->rxf_ntuple.l3_v6_available = ATL_NTUPLE_V6_FLT_NUM; + nic->rxf_ntuple.l4_base_index = 0; + nic->rxf_ntuple.l4_available = ATL_NTUPLE_FLT_NUM; + return ret; } @@ -882,6 +898,63 @@ static int atl_fw2_update_thermal(struct atl_hw *hw) return ret; } +static int atl_fw2_send_ptp_request(struct atl_hw *hw, + struct ptp_msg_fw_request *msg) +{ + size_t size; + int ret = 0; + + if (!msg) + return -EINVAL; + + size = sizeof(msg->msg_id); + switch (msg->msg_id) { + case ptp_gpio_ctrl_msg: + size += sizeof(msg->gpio_ctrl); + break; + case ptp_adj_freq_msg: + size += sizeof(msg->adj_freq); + break; + case ptp_adj_clock_msg: + size += sizeof(msg->adj_clock); + break; + default: + return -EINVAL; + } + + atl_lock_fw(hw); + + /* Write macsec request to cfg memory */ + ret = atl_write_mcp_mem(hw, 0, msg, (size + 3) & ~3, MCP_AREA_CONFIG); + if (ret) { + atl_dev_err("Failed to upload ptp request: %d\n", ret); + goto err_exit; + } + + /* Toggle statistics bit for FW to update */ + ret = atl_fw2_update_statistics(hw); + +err_exit: + atl_unlock_fw(hw); + return ret; +} + +static void atl_fw3_set_ptp(struct atl_hw *hw, bool on) +{ + u32 all_ptp_features = atl_fw2_ex_caps_phy_ptp_en | atl_fw2_ex_caps_ptp_gpio_en; + u32 ptp_opts; + + atl_lock_fw(hw); + ptp_opts = atl_read(hw, ATL_MCP_SCRATCH(FW3_EXT_RES)); + if (on) + ptp_opts |= all_ptp_features; + else + ptp_opts &= ~all_ptp_features; + + atl_write(hw, ATL_MCP_SCRATCH(FW3_EXT_REQ), ptp_opts); + atl_unlock_fw(hw); +} + static struct atl_fw_ops atl_fw_ops[2] = { [0] = { .__wait_fw_init = __atl_fw1_wait_fw_init, @@ -921,6 +994,8 @@ static struct atl_fw_ops atl_fw_ops[2] = { .__get_hbeat = __atl_fw2_get_hbeat, .get_mac_addr = atl_fw2_get_mac_addr, .update_thermal = atl_fw2_update_thermal, + .send_ptp_req = atl_fw2_send_ptp_request, + .set_ptp = atl_fw3_set_ptp, .deinit = atl_fw1_unsupported, }, }; diff --git a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_fw.h b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_fw.h index 86500ed29c29a6dbec1af86cc9b09d7f7bd521cd..9115933157ff113dd600f8dda85b0eaaef762f6b 100644 --- a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_fw.h +++ b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_fw.h @@ -95,6 +95,9 @@ enum atl_fw2_opts { }; enum atl_fw2_ex_caps { + atl_define_bit(atl_fw2_ex_caps_phy_ptp_en, 16) + atl_define_bit(atl_fw2_ex_caps_ptp_gpio_en, 20) + atl_define_bit(atl_fw2_ex_caps_phy_ctrl_ts_pin, 22) atl_define_bit(atl_fw2_ex_caps_wol_ex, 23) atl_define_bit(atl_fw2_ex_caps_mac_heartbeat, 25) atl_define_bit(atl_fw2_ex_caps_msm_settings_apply, 26) @@ -108,10 +111,12 @@ enum atl_fw2_wol_ex { enum atl_fw2_stat_offt { atl_fw2_stat_phy_hbeat = 0x4c, atl_fw2_stat_temp = 0x50, + atl_fw2_stat_ptp_offset = 0x64, atl_fw2_stat_lcaps = 0x84, atl_fw2_stat_settings_addr = 0x10c, atl_fw2_stat_settings_len = 0x110, atl_fw2_stat_caps_ex = 0x114, + atl_fw2_stat_gpio_pin = 0x118, }; enum atl_fw2_settings_offt { @@ -172,6 +177,8 @@ struct atl_link_state{ bool autoneg; bool eee; bool eee_enabled; + bool ptp_available; + bool ptp_datapath_up; struct atl_link_type *link; struct atl_fc_state fc; }; @@ -185,23 +192,87 @@ enum macsec_msg_type { macsec_get_stats_msg, }; -struct macsec_cfg_request { +struct __packed macsec_cfg_request { u32 enabled; u32 egress_threshold; u32 ingress_threshold; u32 interrupts_enabled; -} __attribute__((__packed__)); +}; -struct macsec_msg_fw_request { +struct __packed macsec_msg_fw_request { u32 msg_id; /* not used */ u32 msg_type; struct macsec_cfg_request cfg; -} __attribute__((__packed__)); +}; -struct macsec_msg_fw_response { +struct __packed macsec_msg_fw_response { u32 result; -} __attribute__((__packed__)); +}; + +enum atl_gpio_pin_function { + GPIO_PIN_FUNCTION_NC, + GPIO_PIN_FUNCTION_VAUX_ENABLE, + GPIO_PIN_FUNCTION_EFUSE_BURN_ENABLE, + GPIO_PIN_FUNCTION_SFP_PLUS_DETECT, + GPIO_PIN_FUNCTION_TX_DISABLE, + GPIO_PIN_FUNCTION_RATE_SEL_0, + GPIO_PIN_FUNCTION_RATE_SEL_1, + GPIO_PIN_FUNCTION_TX_FAULT, + GPIO_PIN_FUNCTION_PTP0, + GPIO_PIN_FUNCTION_PTP1, + GPIO_PIN_FUNCTION_PTP2, + GPIO_PIN_FUNCTION_SIZE +}; + +struct __packed atl_ptp_offset_info { + u16 ingress_100; + u16 egress_100; + u16 ingress_1000; + u16 egress_1000; + u16 ingress_2500; + u16 egress_2500; + u16 ingress_5000; + u16 egress_5000; + u16 ingress_10000; + u16 egress_10000; +}; + +enum ptp_msg_type { + ptp_gpio_ctrl_msg = 0x11, + ptp_adj_freq_msg = 0x12, + ptp_adj_clock_msg = 0x13, +}; + +struct __packed ptp_gpio_ctrl { + u32 index; + u32 period; + u64 start; +}; + +struct __packed ptp_adj_freq { + u32 ns_mac; + u32 fns_mac; + u32 ns_phy; + u32 fns_phy; + u32 mac_ns_adj; + u32 mac_fns_adj; +}; + +struct __packed ptp_adj_clock { + u32 ns; + u32 sec; + int sign; +}; + +struct __packed ptp_msg_fw_request { + u32 msg_id; + union { + struct ptp_gpio_ctrl gpio_ctrl; + struct ptp_adj_freq adj_freq; + struct ptp_adj_clock adj_clock; + }; +}; struct atl_fw_ops { void (*set_link)(struct atl_hw *hw, bool force); @@ -223,6 +294,8 @@ struct atl_fw_ops { int (*__get_hbeat)(struct atl_hw *hw, uint16_t *hbeat); int (*get_mac_addr)(struct atl_hw *hw, uint8_t *buf); int (*update_thermal)(struct atl_hw *hw); + int (*send_ptp_req)(struct atl_hw *hw, struct ptp_msg_fw_request *msg); + void (*set_ptp)(struct atl_hw *hw, bool on); int (*deinit)(struct atl_hw *hw); }; diff --git a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_fwd.c b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_fwd.c index 2b45d724f0c83fcf7166fa89e9382133bccd0c2b..2b5e899c6bf8609eb20c2379159179dfc808dd78 100644 --- a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_fwd.c +++ b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_fwd.c @@ -362,7 +362,7 @@ void atl_fwd_release_ring(struct atl_fwd_ring *ring) hwring->size * sizeof(*hwring->descs), hwring->daddr, ops); else - atl_free_descs(nic, &ring->hw); + atl_free_descs(nic, &ring->hw, 0); kfree(ring); } EXPORT_SYMBOL(atl_fwd_release_ring); @@ -461,7 +461,7 @@ struct atl_fwd_ring *atl_fwd_request_ring(struct net_device *ndev, } else ret = PTR_ERR(descs); } else - ret = atl_alloc_descs(nic, hwring); + ret = atl_alloc_descs(nic, hwring, 0); if (ret) { atl_nic_err("%s: couldn't alloc the ring\n", __func__); @@ -494,7 +494,7 @@ struct atl_fwd_ring *atl_fwd_request_ring(struct net_device *ndev, hwring->size * sizeof(*hwring->descs), hwring->daddr, ops); else - atl_free_descs(nic, hwring); + atl_free_descs(nic, hwring, 0); free_ring: kfree(ring); diff --git a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_fwdnl.c b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_fwdnl.c index f12ddc85ebb51dc9ed5390168574cbf282e720e7..3e2a982963197c81d90066a9e7b77e3918e84762 100644 --- a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_fwdnl.c +++ b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_fwdnl.c @@ -800,7 +800,7 @@ static bool atlfwd_nl_tx_head_poll_ring(struct atl_fwd_ring *ring) bump_ptr(sw_head, desc, 1); } while (budget--); - WRITE_ONCE(desc->head, sw_head); + desc->head = sw_head; pr_debug(ATL_FWDNL_PREFIX "bytes=%u, packets=%u, sw_head=%d\n", bytes, packets, sw_head); @@ -952,7 +952,7 @@ static int atlfwd_nl_transmit_skb_ring(struct atl_fwd_ring *ring, while (len > ATL_DATA_PER_TXD) { desc.len = cpu_to_le16(ATL_DATA_PER_TXD); txbuf->bytes = ATL_DATA_PER_TXD; - WRITE_ONCE(ring->hw.descs[desc_idx].tx, desc); + ring->hw.descs[desc_idx].tx = desc; bump_ptr(desc_idx, ring_desc, 1); txbuf = &ring_desc->txbufs[desc_idx]; memset(txbuf, 0, sizeof(*txbuf)); @@ -971,7 +971,7 @@ static int atlfwd_nl_transmit_skb_ring(struct atl_fwd_ring *ring, if (!frags) break; - WRITE_ONCE(ring->hw.descs[desc_idx].tx, desc); + ring->hw.descs[desc_idx].tx = desc; bump_ptr(desc_idx, ring_desc, 1); txbuf = &ring_desc->txbufs[desc_idx]; memset(txbuf, 0, sizeof(*txbuf)); @@ -992,7 +992,7 @@ static int atlfwd_nl_transmit_skb_ring(struct atl_fwd_ring *ring, */ txbuf->packets = 1; txbuf->skb = skb; - WRITE_ONCE(ring->hw.descs[desc_idx].tx, desc); + ring->hw.descs[desc_idx].tx = desc; bump_ptr(desc_idx, ring_desc, 1); ring_desc->tail = desc_idx; diff --git a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_hw.c b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_hw.c index 2761411995d7100d5e7e95319adfbaa4d14e7bd2..338d6f014c3d92fcca3618e06842cbd5578db797 100644 --- a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_hw.c +++ b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_hw.c @@ -14,6 +14,7 @@ #include "atl_common.h" #include "atl_hw.h" +#include "atl_ptp.h" #include "atl_ring.h" #include "atl2_fw.h" @@ -83,14 +84,19 @@ static inline void atl_glb_soft_reset_full(struct atl_hw *hw) } static void atl2_hw_new_rx_filter_vlan_promisc(struct atl_hw *hw, bool promisc); -static void atl2_hw_new_rx_filter_promisc(struct atl_hw *hw, bool promisc); +static void atl2_hw_new_rx_filter_promisc(struct atl_hw *hw, bool promisc, + bool allmulti); static void atl2_hw_init_new_rx_filters(struct atl_hw *hw); -static void atl_set_promisc(struct atl_hw *hw, bool enabled) +static void atl_set_promisc(struct atl_hw *hw, bool enabled, bool allmulti) { atl_write_bit(hw, ATL_RX_FLT_CTRL1, 3, enabled); if (hw->new_rpf) - atl2_hw_new_rx_filter_promisc(hw, enabled); + atl2_hw_new_rx_filter_promisc(hw, enabled, allmulti); + else { + atl_write_bit(hw, ATL_RX_MC_FLT_MSK, 14, allmulti); + atl_write(hw, ATL_RX_MC_FLT(0), allmulti ? 0x80010FFF : 0x00010FFF); + } } void atl_set_vlan_promisc(struct atl_hw *hw, int promisc) @@ -105,7 +111,7 @@ static inline void atl_enable_dma_net_lpb_mode(struct atl_nic *nic) struct atl_hw *hw = &nic->hw; atl_set_vlan_promisc(hw, 1); - atl_set_promisc(hw, 1); + atl_set_promisc(hw, 1, 0); atl_write_bit(hw, ATL_TX_PBUF_CTRL1, 4, 0); atl_write_bit(hw, ATL_TX_CTRL1, 4, 1); atl_write_bit(hw, ATL_RX_CTRL1, 4, 1); @@ -490,6 +496,12 @@ void atl_refresh_link(struct atl_nic *nic) pm_runtime_put_sync(&nic->hw.pdev->dev); } } + if (nic->ptp) { + atl_ptp_clock_init(nic); + atl_ptp_tm_offset_set(nic, link ? link->speed : 0); + atl_ptp_link_change(nic); + } + atl_rx_xoff_set(hw, !!(hw->link_state.fc.cur & atl_fc_rx)); atl_intr_enable_non_ring(nic); @@ -652,6 +664,9 @@ int atl_set_rss_tbl(struct atl_hw *hw) return 0; } +static unsigned int atl_ptp_rx_buf_reserve = 16; +static unsigned int atl_ptp_tx_buf_reserve = 8; + unsigned int atl_fwd_rx_buf_reserve = #ifdef CONFIG_ATLFWD_FWD_RXBUF CONFIG_ATLFWD_FWD_RXBUF; @@ -692,16 +707,20 @@ void atl_start_hw_global(struct atl_nic *nic) tpb_size = 128; } /* Alloc TPB */ + /* TC2: space for PTP */ + tpb_size -= atl_ptp_tx_buf_reserve; + atl_write(hw, ATL_TX_PBUF_REG1(2), atl_ptp_tx_buf_reserve); /* TC1: space for offload engine iface */ + tpb_size -= atl_fwd_tx_buf_reserve; atl_write(hw, ATL_TX_PBUF_REG1(1), atl_fwd_tx_buf_reserve); atl_write(hw, ATL_TX_PBUF_REG2(1), (atl_fwd_tx_buf_reserve * 32 * 66 / 100) << 16 | (atl_fwd_tx_buf_reserve * 32 * 50 / 100)); /* TC0: 160k minus TC1 size */ - atl_write(hw, ATL_TX_PBUF_REG1(0), tpb_size - atl_fwd_tx_buf_reserve); + atl_write(hw, ATL_TX_PBUF_REG1(0), tpb_size); atl_write(hw, ATL_TX_PBUF_REG2(0), - ((tpb_size - atl_fwd_tx_buf_reserve) * 32 * 66 / 100) << 16 | - ((tpb_size - atl_fwd_tx_buf_reserve) * 32 * 50 / 100)); + (tpb_size * 32 * 66 / 100) << 16 | + (tpb_size * 32 * 50 / 100)); /* 4-TC | Enable TPB */ atl_set_bits(hw, ATL_TX_PBUF_CTRL1, BIT(8) | BIT(0)); /* TX Buffer clk gate off */ @@ -709,16 +728,22 @@ void atl_start_hw_global(struct atl_nic *nic) atl_clear_bits(hw, ATL_TX_PBUF_CTRL1, BIT(5)); /* Alloc RPB */ + /* TC2: space for PTP */ + rpb_size -= atl_ptp_rx_buf_reserve; + atl_write(hw, ATL_RX_PBUF_REG1(2), atl_ptp_rx_buf_reserve); + /* No flow control for PTP */ + atl_write_bit(hw, ATL_RX_PBUF_REG2(2), 31, 0); /* TC1: space for offload engine iface */ + rpb_size -= atl_fwd_rx_buf_reserve; atl_write(hw, ATL_RX_PBUF_REG1(1), atl_fwd_rx_buf_reserve); atl_write(hw, ATL_RX_PBUF_REG2(1), BIT(31) | (atl_fwd_rx_buf_reserve * 32 * 66 / 100) << 16 | (atl_fwd_rx_buf_reserve * 32 * 50 / 100)); /* TC1: 320k minus TC1 size */ - atl_write(hw, ATL_RX_PBUF_REG1(0), rpb_size - atl_fwd_rx_buf_reserve); + atl_write(hw, ATL_RX_PBUF_REG1(0), rpb_size); atl_write(hw, ATL_RX_PBUF_REG2(0), BIT(31) | - ((rpb_size - atl_fwd_rx_buf_reserve) * 32 * 66 / 100) << 16 | - ((rpb_size - atl_fwd_rx_buf_reserve) * 32 * 50 / 100)); + (rpb_size * 32 * 66 / 100) << 16 | + (rpb_size * 32 * 50 / 100)); /* 4-TC | Enable RPB */ atl_set_bits(hw, ATL_RX_PBUF_CTRL1, BIT(8) | BIT(4) | BIT(0)); @@ -744,9 +769,9 @@ void atl_start_hw_global(struct atl_nic *nic) /* RPF */ /* Default RPF2 parser options */ atl_write(hw, ATL_RX_FLT_CTRL2, 0x0); - atl_set_uc_flt(hw, 0, hw->mac_addr); + atl_set_uc_flt(hw, nic->rxf_mac.base_index, hw->mac_addr); /* BC action host */ - atl_write_bits(hw, ATL_RX_FLT_CTRL1, 12, 3, 1); + atl_write_bits(hw, ATL_RX_FLT_CTRL1, 12, 1, 1); /* Enable BC */ atl_write_bit(hw, ATL_RX_FLT_CTRL1, 0, 1); /* BC thresh */ @@ -832,22 +857,16 @@ void atl_start_hw_global(struct atl_nic *nic) #define atl_vlan_flt_val(vid) ((uint32_t)(vid) | 1 << 16 | 1 << 31) -static void atl_set_all_multi(struct atl_hw *hw, bool all_multi) -{ - atl_write_bit(hw, ATL_RX_MC_FLT_MSK, 14, all_multi); - atl_write(hw, ATL_RX_MC_FLT(0), all_multi ? 0x80010FFF : 0x00010FFF); -} - void atl_set_rx_mode(struct net_device *ndev) { struct atl_nic *nic = netdev_priv(ndev); struct atl_hw *hw = &nic->hw; bool is_multicast_enabled = !!(ndev->flags & IFF_MULTICAST); - int all_multi_needed = !!(ndev->flags & IFF_ALLMULTI); - int promisc_needed = !!(ndev->flags & IFF_PROMISC); + bool all_multi_needed = !!(ndev->flags & IFF_ALLMULTI); + bool promisc_needed = !!(ndev->flags & IFF_PROMISC); + int i = nic->rxf_mac.base_index + 1; /* 1 reserved for MAC address */ int uc_count = netdev_uc_count(ndev); int mc_count = 0; - int i = 1; /* UC filter 0 reserved for MAC address */ struct netdev_hw_addr *hwaddr; if (!pm_runtime_active(&nic->hw.pdev->dev)) @@ -856,11 +875,10 @@ void atl_set_rx_mode(struct net_device *ndev) if (is_multicast_enabled) mc_count = netdev_mc_count(ndev); - if (uc_count > ATL_UC_FLT_NUM - 1) - promisc_needed |= 1; - else if (uc_count + mc_count > ATL_UC_FLT_NUM - 1) - all_multi_needed |= 1; - + if (uc_count > nic->rxf_mac.available - 1) + promisc_needed = true; + else if (uc_count + mc_count > nic->rxf_mac.available - 1) + all_multi_needed = true; /* Enable promisc VLAN mode if IFF_PROMISC explicitly * requested or too many VIDs registered @@ -869,15 +887,18 @@ void atl_set_rx_mode(struct net_device *ndev) ndev->flags & IFF_PROMISC || nic->rxf_vlan.promisc_count || !nic->rxf_vlan.vlans_active); - atl_set_promisc(hw, promisc_needed); + atl_set_promisc(hw, promisc_needed, + is_multicast_enabled && all_multi_needed); + if (hw->new_rpf) + atl2_fw_set_filter_policy(hw, promisc_needed, + is_multicast_enabled && all_multi_needed); + if (promisc_needed) return; netdev_for_each_uc_addr(hwaddr, ndev) atl_set_uc_flt(hw, i++, hwaddr->addr); - atl_set_all_multi(hw, is_multicast_enabled && all_multi_needed); - if (is_multicast_enabled && !all_multi_needed) netdev_for_each_mc_addr(hwaddr, ndev) atl_set_uc_flt(hw, i++, hwaddr->addr); @@ -886,11 +907,12 @@ void atl_set_rx_mode(struct net_device *ndev) atl_disable_uc_flt(hw, i++); } -int atl_alloc_descs(struct atl_nic *nic, struct atl_hw_ring *ring) +int atl_alloc_descs(struct atl_nic *nic, struct atl_hw_ring *ring, size_t extra) { struct device *dev = &nic->hw.pdev->dev; - ring->descs = dma_alloc_coherent(dev, ring->size * sizeof(*ring->descs), + ring->descs = dma_alloc_coherent(dev, + ring->size * sizeof(*ring->descs) + extra, &ring->daddr, GFP_KERNEL); if (!ring->descs) @@ -899,14 +921,14 @@ int atl_alloc_descs(struct atl_nic *nic, struct atl_hw_ring *ring) return 0; } -void atl_free_descs(struct atl_nic *nic, struct atl_hw_ring *ring) +void atl_free_descs(struct atl_nic *nic, struct atl_hw_ring *ring, size_t extra) { struct device *dev = &nic->hw.pdev->dev; if (!ring->descs) return; - dma_free_coherent(dev, ring->size * sizeof(*ring->descs), + dma_free_coherent(dev, ring->size * sizeof(*ring->descs) + extra, ring->descs, ring->daddr); ring->descs = 0; } @@ -1205,7 +1227,7 @@ void atl_adjust_eth_stats(struct atl_ether_stats *stats, int atl_update_eth_stats(struct atl_nic *nic) { struct atl_hw *hw = &nic->hw; - struct atl_ether_stats stats = {0}; + struct atl_ether_stats stats; uint32_t reg = 0, reg2 = 0; int ret; @@ -1213,6 +1235,8 @@ int atl_update_eth_stats(struct atl_nic *nic) test_bit(ATL_ST_RESETTING, &nic->hw.state)) return 0; + memset(&stats, 0, sizeof(stats)); + atl_lock_fw(hw); ret = atl_hwsem_get(hw, ATL_MCP_SEM_MSM); @@ -1437,105 +1461,73 @@ int atl2_act_rslvr_table_set(struct atl_hw *hw, u8 location, /** Initialise new rx filters * L2 promisc OFF * VLAN promisc OFF - * - * VLAN - * MAC - * ALLMULTI - * UT - * VLAN promisc ON - * L2 promisc ON + * user custom filtlers + * RSS TC0 */ static void atl2_hw_init_new_rx_filters(struct atl_hw *hw) { - atl_write(hw, ATL2_RPF_REC_TAB_EN, 0xFFFF); - atl_write_bits(hw, ATL_RX_UC_FLT_REG2(0), 22, 6, ATL2_RPF_TAG_BASE_UC); - atl_write_bits(hw, ATL2_RX_FLT_L2_BC_TAG, 0, 6, ATL2_RPF_TAG_BASE_UC); + struct atl_nic *nic = container_of(hw, struct atl_nic, hw); + uint32_t art_last_sec, art_first_sec, art_mask; + int index; + + atl_write_bits(hw, ATL_RX_UC_FLT_REG2(nic->rxf_mac.base_index), + 22, 6, ATL2_RPF_TAG_BASE_UC); + atl_write_bits(hw, ATL2_RX_FLT_L2_BC_TAG, 0, 6, ATL2_RPF_TAG_BASE_BC); atl_set_bits(hw, ATL2_RPF_L3_FLT(0), BIT(0x17)); + atl_write_bit(hw, ATL_RX_MC_FLT_MSK, 14, 1); + + art_last_sec = hw->art_base_index / 8 + hw->art_available / 8; + art_first_sec = hw->art_base_index / 8; + art_mask = (BIT(art_last_sec) - 1) - (BIT(art_first_sec) - 1); + atl_set_bits(hw, ATL2_RPF_REC_TAB_EN, art_mask); + index = hw->art_base_index + ATL2_RPF_L2_PROMISC_OFF_INDEX; atl2_act_rslvr_table_set(hw, - ATL2_RPF_L2_PROMISC_OFF_INDEX, + index, 0, ATL2_RPF_TAG_UC_MASK | ATL2_RPF_TAG_ALLMC_MASK, ATL2_ACTION_DROP); + index = hw->art_base_index + ATL2_RPF_VLAN_PROMISC_OFF_INDEX; atl2_act_rslvr_table_set(hw, - ATL2_RPF_VLAN_PROMISC_OFF_INDEX, + index, 0, ATL2_RPF_TAG_VLAN_MASK | ATL2_RPF_TAG_UNTAG_MASK, ATL2_ACTION_DROP); - - atl2_act_rslvr_table_set(hw, - ATL2_RPF_VLAN_INDEX, - ATL2_RPF_TAG_BASE_VLAN, - ATL2_RPF_TAG_VLAN_MASK, - ATL2_ACTION_ASSIGN_TC(0)); - + index = hw->art_base_index + ATL2_RPF_DEFAULT_RULE_INDEX; atl2_act_rslvr_table_set(hw, - ATL2_RPF_MAC_INDEX, - ATL2_RPF_TAG_BASE_UC, - ATL2_RPF_TAG_UC_MASK, - ATL2_ACTION_ASSIGN_TC(0)); - - atl2_act_rslvr_table_set(hw, - ATL2_RPF_ALLMC_INDEX, - ATL2_RPF_TAG_BASE_ALLMC, - ATL2_RPF_TAG_ALLMC_MASK, - ATL2_ACTION_ASSIGN_TC(0)); - - atl2_act_rslvr_table_set(hw, - ATL2_RPF_UNTAG_INDEX, - ATL2_RPF_TAG_UNTAG_MASK, - ATL2_RPF_TAG_UNTAG_MASK, - ATL2_ACTION_ASSIGN_TC(0)); - - atl2_act_rslvr_table_set(hw, - ATL2_RPF_VLAN_PROMISC_ON_INDEX, + index, 0, - ATL2_RPF_TAG_VLAN_MASK, - ATL2_ACTION_DISABLE); - - atl2_act_rslvr_table_set(hw, - ATL2_RPF_L2_PROMISC_ON_INDEX, 0, - ATL2_RPF_TAG_UC_MASK, - ATL2_ACTION_DISABLE); + ATL2_ACTION_ASSIGN_TC(0)); } - static void atl2_hw_new_rx_filter_vlan_promisc(struct atl_hw *hw, bool promisc) { - u16 on_action = promisc ? ATL2_ACTION_ASSIGN_TC(0) : ATL2_ACTION_DISABLE; u16 off_action = !promisc ? ATL2_ACTION_DROP : ATL2_ACTION_DISABLE; + int index = hw->art_base_index + ATL2_RPF_VLAN_PROMISC_OFF_INDEX; atl2_act_rslvr_table_set(hw, - ATL2_RPF_VLAN_PROMISC_ON_INDEX, - 0, - ATL2_RPF_TAG_VLAN_MASK, - on_action); - - atl2_act_rslvr_table_set(hw, - ATL2_RPF_VLAN_PROMISC_OFF_INDEX, + index, 0, ATL2_RPF_TAG_VLAN_MASK | ATL2_RPF_TAG_UNTAG_MASK, off_action); } -static void atl2_hw_new_rx_filter_promisc(struct atl_hw *hw, bool promisc) +static void atl2_hw_new_rx_filter_promisc(struct atl_hw *hw, bool promisc, + bool allmulti) { - u16 on_action = promisc ? ATL2_ACTION_ASSIGN_TC(0) : ATL2_ACTION_DISABLE; u16 off_action = promisc ? ATL2_ACTION_DISABLE : ATL2_ACTION_DROP; + u32 mask = allmulti ? (ATL2_RPF_TAG_UC_MASK | ATL2_RPF_TAG_ALLMC_MASK) : + ATL2_RPF_TAG_UC_MASK; + int index = hw->art_base_index + ATL2_RPF_L2_PROMISC_OFF_INDEX; atl2_act_rslvr_table_set(hw, - ATL2_RPF_L2_PROMISC_OFF_INDEX, + index, 0, - ATL2_RPF_TAG_UC_MASK | ATL2_RPF_TAG_ALLMC_MASK, + mask, off_action); - atl2_act_rslvr_table_set(hw, - ATL2_RPF_L2_PROMISC_ON_INDEX, - 0, - ATL2_RPF_TAG_UC_MASK, - on_action); } diff --git a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_hw.h b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_hw.h index ed5f672a13bf1c7079e2df71173f12d616cade10..57d5feba725f4d205a1d214ec32d03af746b2dde 100644 --- a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_hw.h +++ b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_hw.h @@ -41,18 +41,15 @@ struct atl_nic; #define ATL2_ACTION_ASSIGN_QUEUE(QUEUE) ATL2_ACTION(1, 0, (QUEUE), 1, 0) #define ATL2_ACTION_ASSIGN_TC(TC) ATL2_ACTION(1, 1, (TC), 1, 0) -#define ATL2_RPF_L2_PROMISC_OFF_INDEX 0 -#define ATL2_RPF_VLAN_PROMISC_OFF_INDEX 1 -#define ATL2_RPF_L3L4_USER_INDEX 48 -#define ATL2_RPF_ET_PCP_USER_INDEX 64 -#define ATL2_RPF_VLAN_USER_INDEX 80 -#define ATL2_RPF_FLEX_USER_INDEX 96 -#define ATL2_RPF_VLAN_INDEX 122 -#define ATL2_RPF_MAC_INDEX 123 -#define ATL2_RPF_ALLMC_INDEX 124 -#define ATL2_RPF_UNTAG_INDEX 125 -#define ATL2_RPF_VLAN_PROMISC_ON_INDEX 126 -#define ATL2_RPF_L2_PROMISC_ON_INDEX 127 +enum { + ATL2_RPF_L2_PROMISC_OFF_INDEX = 0, + ATL2_RPF_VLAN_PROMISC_OFF_INDEX, + ATL2_RPF_L3L4_USER_INDEX, + ATL2_RPF_ET_PCP_USER_INDEX = ATL2_RPF_L3L4_USER_INDEX + 16, + ATL2_RPF_VLAN_USER_INDEX = ATL2_RPF_ET_PCP_USER_INDEX + 16, + ATL2_RPF_FLEX_USER_INDEX = ATL2_RPF_VLAN_USER_INDEX + 16, + ATL2_RPF_DEFAULT_RULE_INDEX = ATL2_RPF_FLEX_USER_INDEX + 1, +}; #define ATL2_RPF_TAG_UC_OFFSET 0x0 #define ATL2_RPF_TAG_ALLMC_OFFSET 0x6 @@ -77,10 +74,8 @@ struct atl_nic; #define ATL2_RPF_TAG_FLEX_MASK (0x00000003 << ATL2_RPF_TAG_FLEX_OFFSET) #define ATL2_RPF_TAG_PCP_MASK (0x00000007 << ATL2_RPF_TAG_PCP_OFFSET) -#define ATL2_RPF_TAG_BASE_UC (1 << ATL2_RPF_TAG_UC_OFFSET) -#define ATL2_RPF_TAG_BASE_ALLMC (1 << ATL2_RPF_TAG_ALLMC_OFFSET) -#define ATL2_RPF_TAG_BASE_UNTAG (1 << ATL2_RPF_TAG_UNTAG_OFFSET) -#define ATL2_RPF_TAG_BASE_VLAN (1 << ATL2_RPF_TAG_VLAN_OFFSET) +#define ATL2_RPF_TAG_BASE_BC (1 << ATL2_RPF_TAG_UC_OFFSET) +#define ATL2_RPF_TAG_BASE_UC (2 << ATL2_RPF_TAG_UC_OFFSET) #define ATL2_FW_HOSTLOAD_REQ_LEN_MAX 0x1000 @@ -129,6 +124,7 @@ enum atl_nic_state { #define ATL_WAKE_SUPPORTED (WAKE_MAGIC | WAKE_PHY) struct atl_hw { + atomic_t flags; uint8_t __iomem *regs; struct pci_dev *pdev; unsigned long state; @@ -148,6 +144,9 @@ struct atl_hw { #if IS_ENABLED(CONFIG_MACSEC) && defined(NETIF_F_HW_MACSEC) struct atl_macsec_cfg macsec_cfg; #endif + s64 ptp_clk_offset; + int art_base_index; + int art_available; }; struct atl_hw_ring { @@ -326,8 +325,8 @@ void atl_set_rss_key(struct atl_hw *hw); int atl_set_rss_tbl(struct atl_hw *hw); void atl_set_uc_flt(struct atl_hw *hw, int idx, uint8_t mac_addr[ETH_ALEN]); -int atl_alloc_descs(struct atl_nic *nic, struct atl_hw_ring *ring); -void atl_free_descs(struct atl_nic *nic, struct atl_hw_ring *ring); +int atl_alloc_descs(struct atl_nic *nic, struct atl_hw_ring *ring, size_t extra); +void atl_free_descs(struct atl_nic *nic, struct atl_hw_ring *ring, size_t extra); void atl_set_intr_bits(struct atl_hw *hw, int idx, int rxbit, int txbit); int atl_alloc_link_intr(struct atl_nic *nic); void atl_free_link_intr(struct atl_nic *nic); diff --git a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_hw_ptp.c b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_hw_ptp.c new file mode 100644 index 0000000000000000000000000000000000000000..fd2f0bd549b74ad531a3ef0ac01ea0308c219d0b --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_hw_ptp.c @@ -0,0 +1,247 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Atlantic Network Driver + * + * Copyright (C) 2017 aQuantia Corporation + * Copyright (C) 2019-2020 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include + +#include "atl_hw_ptp.h" +#include "atl_desc.h" +#include "atl_mdio.h" + +#define FRAC_PER_NS 0x100000000LL + +#define ATL_HW_MAC_COUNTER_HZ 312500000ll +#define ATL_HW_PHY_COUNTER_HZ 160000000ll + +/* register address for bitfield PTP Digital Clock Read Enable */ +#define HW_ATL_PCS_PTP_CLOCK_READ_ENABLE_ADR 0x00004628 +/* lower bit position of bitfield PTP Digital Clock Read Enable */ +#define HW_ATL_PCS_PTP_CLOCK_READ_ENABLE_SHIFT 4 +/* width of bitfield PTP Digital Clock Read Enable */ +#define HW_ATL_PCS_PTP_CLOCK_READ_ENABLE_WIDTH 1 + +/* register address for ptp counter reading */ +#define HW_ATL_PCS_PTP_TS_VAL_ADDR(index) (0x00004900 + (index) * 0x4) + +static void hw_atl_pcs_ptp_clock_read_enable(struct atl_hw *hw, + u32 ptp_clock_read_enable) +{ + atl_write_bits(hw, HW_ATL_PCS_PTP_CLOCK_READ_ENABLE_ADR, + HW_ATL_PCS_PTP_CLOCK_READ_ENABLE_SHIFT, + HW_ATL_PCS_PTP_CLOCK_READ_ENABLE_WIDTH, + ptp_clock_read_enable); +} + +static u32 hw_atl_pcs_ptp_clock_get(struct atl_hw *hw, u32 index) +{ + return atl_read(hw, HW_ATL_PCS_PTP_TS_VAL_ADDR(index)); +} + +#define get_ptp_ts_val_u64(self, indx) \ + ((u64)(hw_atl_pcs_ptp_clock_get(self, indx) & 0xffff)) + +void hw_atl_get_ptp_ts(struct atl_hw *hw, u64 *stamp) +{ + u64 ns; + + hw_atl_pcs_ptp_clock_read_enable(hw, 1); + hw_atl_pcs_ptp_clock_read_enable(hw, 0); + ns = (get_ptp_ts_val_u64(hw, 0) + + (get_ptp_ts_val_u64(hw, 1) << 16)) * NSEC_PER_SEC + + (get_ptp_ts_val_u64(hw, 3) + + (get_ptp_ts_val_u64(hw, 4) << 16)); + + *stamp = ns + hw->ptp_clk_offset; +} + +static void hw_atl_adj_params_get(u64 freq, s64 adj, u32 *ns, u32 *fns) +{ + /* For accuracy, the digit is extended */ + s64 base_ns = ((adj + NSEC_PER_SEC) * NSEC_PER_SEC); + u64 nsi_frac = 0; + u64 nsi; + + base_ns = div64_s64(base_ns, freq); + nsi = div64_u64(base_ns, NSEC_PER_SEC); + + if (base_ns != nsi * NSEC_PER_SEC) { + s64 divisor = div64_s64((s64)NSEC_PER_SEC * NSEC_PER_SEC, + base_ns - nsi * NSEC_PER_SEC); + nsi_frac = div64_s64(FRAC_PER_NS * NSEC_PER_SEC, divisor); + } + + *ns = (u32)nsi; + *fns = (u32)nsi_frac; +} + +static void +hw_atl_mac_adj_param_calc(struct ptp_adj_freq *ptp_adj_freq, u64 phyfreq, + u64 macfreq) +{ + s64 adj_fns_val; + s64 fns_in_sec_phy = phyfreq * (ptp_adj_freq->fns_phy + + FRAC_PER_NS * ptp_adj_freq->ns_phy); + s64 fns_in_sec_mac = macfreq * (ptp_adj_freq->fns_mac + + FRAC_PER_NS * ptp_adj_freq->ns_mac); + s64 fault_in_sec_phy = FRAC_PER_NS * NSEC_PER_SEC - fns_in_sec_phy; + s64 fault_in_sec_mac = FRAC_PER_NS * NSEC_PER_SEC - fns_in_sec_mac; + /* MAC MCP counter freq is macfreq / 4 */ + s64 diff_in_mcp_overflow = (fault_in_sec_mac - fault_in_sec_phy) * + 4 * FRAC_PER_NS; + + diff_in_mcp_overflow = div64_s64(diff_in_mcp_overflow, + ATL_HW_MAC_COUNTER_HZ); + adj_fns_val = (ptp_adj_freq->fns_mac + FRAC_PER_NS * + ptp_adj_freq->ns_mac) + diff_in_mcp_overflow; + + ptp_adj_freq->mac_ns_adj = div64_s64(adj_fns_val, FRAC_PER_NS); + ptp_adj_freq->mac_fns_adj = adj_fns_val - ptp_adj_freq->mac_ns_adj * + FRAC_PER_NS; +} + +int hw_atl_adj_sys_clock(struct atl_hw *hw, s64 delta) +{ + hw->ptp_clk_offset += delta; + + atl_write(hw, ATL_RX_SPARE_CTRL0, lower_32_bits(hw->ptp_clk_offset)); + atl_write(hw, ATL_RX_SPARE_CTRL1, upper_32_bits(hw->ptp_clk_offset)); + + return 0; +} + +int hw_atl_ts_to_sys_clock(struct atl_hw *hw, u64 ts, u64 *time) +{ + *time = hw->ptp_clk_offset + ts; + return 0; +} + +int hw_atl_adj_clock_freq(struct atl_hw *hw, s32 ppb) +{ + struct ptp_msg_fw_request fwreq; + struct atl_mcp *mcp = &hw->mcp; + + memset(&fwreq, 0, sizeof(fwreq)); + + fwreq.msg_id = ptp_adj_freq_msg; + hw_atl_adj_params_get(ATL_HW_MAC_COUNTER_HZ, ppb, + &fwreq.adj_freq.ns_mac, + &fwreq.adj_freq.fns_mac); + hw_atl_adj_params_get(ATL_HW_PHY_COUNTER_HZ, ppb, + &fwreq.adj_freq.ns_phy, + &fwreq.adj_freq.fns_phy); + hw_atl_mac_adj_param_calc(&fwreq.adj_freq, + ATL_HW_PHY_COUNTER_HZ, + ATL_HW_MAC_COUNTER_HZ); + + return mcp->ops->send_ptp_req(hw, &fwreq); +} + +int hw_atl_gpio_pulse(struct atl_hw *hw, u32 index, u64 start, u32 period) +{ + struct ptp_msg_fw_request fwreq; + struct atl_mcp *mcp = &hw->mcp; + + memset(&fwreq, 0, sizeof(fwreq)); + + fwreq.msg_id = ptp_gpio_ctrl_msg; + fwreq.gpio_ctrl.index = index; + fwreq.gpio_ctrl.period = period; + /* Apply time offset */ + fwreq.gpio_ctrl.start = start; + + return mcp->ops->send_ptp_req(hw, &fwreq); +} + +int hw_atl_extts_gpio_enable(struct atl_hw *hw, u32 index, u32 enable) +{ + /* Enable/disable Sync1588 GPIO Timestamping */ + return atl_mdio_write(hw, 0, MDIO_MMD_PCS, 0xc611, enable ? 0x71 : 0); +} + +int hw_atl_get_sync_ts(struct atl_hw *hw, u64 *ts) +{ + u16 nsec_l = 0; + u16 nsec_h = 0; + u16 sec_l = 0; + u16 sec_h = 0; + int ret; + + if (!ts) + return -1; + + /* PTP external GPIO clock seconds count 15:0 */ + ret = atl_mdio_read(hw, 0, MDIO_MMD_PCS, 0xc914, &sec_l); + /* PTP external GPIO clock seconds count 31:16 */ + if (!ret) + ret = atl_mdio_read(hw, 0, MDIO_MMD_PCS, 0xc915, &sec_h); + /* PTP external GPIO clock nanoseconds count 15:0 */ + if (!ret) + ret = atl_mdio_read(hw, 0, MDIO_MMD_PCS, 0xc916, &nsec_l); + /* PTP external GPIO clock nanoseconds count 31:16 */ + if (!ret) + ret = atl_mdio_read(hw, 0, MDIO_MMD_PCS, 0xc917, &nsec_h); + + *ts = ((u64)nsec_h << 16) + nsec_l + (((u64)sec_h << 16) + sec_l) * NSEC_PER_SEC; + + return ret; +} + +u16 hw_atl_rx_extract_ts(struct atl_hw *hw, u8 *p, unsigned int len, + u64 *timestamp) +{ + unsigned int offset = 14; + struct ethhdr *eth; + u64 sec; + u8 *ptr; + u32 ns; + + if (len <= offset || !timestamp) + return 0; + + /* The TIMESTAMP in the end of package has following format: + * (big-endian) + * struct { + * uint64_t sec; + * uint32_t ns; + * uint16_t stream_id; + * }; + */ + ptr = p + (len - offset); + memcpy(&sec, ptr, sizeof(sec)); + ptr += sizeof(sec); + memcpy(&ns, ptr, sizeof(ns)); + + sec = be64_to_cpu(sec) & 0xffffffffffffllu; + ns = be32_to_cpu(ns); + *timestamp = sec * NSEC_PER_SEC + ns + hw->ptp_clk_offset; + + eth = (struct ethhdr *)p; + + return (eth->h_proto == htons(ETH_P_1588)) ? 12 : 14; +} + +void hw_atl_extract_hwts(struct atl_hw *hw, struct atl_rx_desc_hwts_wb *hwts_wb, + u64 *timestamp) +{ + u64 tmp, sec, ns; + + sec = 0; + tmp = hwts_wb->sec_lw0 & 0x3ff; + sec += tmp; + tmp = (u64)((hwts_wb->sec_lw1 >> 16) & 0xffff) << 10; + sec += tmp; + tmp = (u64)(hwts_wb->sec_hw & 0xfff) << 26; + sec += tmp; + tmp = (u64)((hwts_wb->sec_hw >> 22) & 0x3ff) << 38; + sec += tmp; + ns = sec * NSEC_PER_SEC + hwts_wb->ns; + if (timestamp) + *timestamp = ns + hw->ptp_clk_offset; +} diff --git a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_hw_ptp.h b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_hw_ptp.h new file mode 100644 index 0000000000000000000000000000000000000000..04e31370bc13d116118a12b43ef0fbf010539b08 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_hw_ptp.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Atlantic Network Driver + * + * Copyright (C) 2017 aQuantia Corporation + * Copyright (C) 2019-2020 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _ATL_HW_PTP_H_ +#define _ATL_HW_PTP_H_ + +#include "atl_hw.h" + +struct atl_rx_desc_hwts_wb; + +void hw_atl_get_ptp_ts(struct atl_hw *hw, u64 *stamp); +int hw_atl_adj_sys_clock(struct atl_hw *hw, s64 delta); +int hw_atl_ts_to_sys_clock(struct atl_hw *hw, u64 ts, u64 *time); +int hw_atl_adj_clock_freq(struct atl_hw *hw, s32 ppb); +int hw_atl_gpio_pulse(struct atl_hw *hw, u32 index, u64 start, u32 period); +int hw_atl_extts_gpio_enable(struct atl_hw *hw, u32 index, u32 enable); +int hw_atl_get_sync_ts(struct atl_hw *hw, u64 *ts); +u16 hw_atl_rx_extract_ts(struct atl_hw *hw, u8 *p, unsigned int len, + u64 *timestamp); +void hw_atl_extract_hwts(struct atl_hw *hw, struct atl_rx_desc_hwts_wb *hwts_wb, + u64 *timestamp); + +#endif diff --git a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_macsec.c b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_macsec.c index b2b3cfb4bfc6c92217722cbc97ebdf3c602593b6..448b868ff1e2f7668feee8a483608fafd7438616 100644 --- a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_macsec.c +++ b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_macsec.c @@ -47,8 +47,9 @@ static int atl_apply_secy_cfg(struct atl_hw *hw, static void atl_ether_addr_to_mac(u32 mac[2], unsigned char *emac) { - u32 tmp[2] = { 0 }; + u32 tmp[2]; + memset(&tmp, 0, sizeof(tmp)); memcpy(((u8 *)tmp) + 2, emac, ETH_ALEN); mac[0] = swab32(tmp[1]); @@ -106,7 +107,7 @@ static int atl_get_txsc_idx_from_sc_idx(const enum atl_macsec_sc_sa sc_sa, /* Rotate keys u32[8] */ static void atl_rotate_keys(u32 (*key)[8], int key_len) { - u32 tmp[8] = { 0 }; + u32 tmp[8]; memcpy(&tmp, key, sizeof(tmp)); memset(*key, 0, sizeof(*key)); diff --git a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_main.c b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_main.c index b68d83d2a6f3968a17147181791bf2cf44a2d727..f7b27b9dfd985c86174ea12ae49688a9a1cc25de 100644 --- a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_main.c +++ b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_main.c @@ -16,6 +16,7 @@ #include #include #include "atl_macsec.h" +#include "atl_ptp.h" #include "atl_qcom.h" @@ -188,6 +189,90 @@ static int atl_change_mtu(struct net_device *ndev, int mtu) #endif +static int atl_hwtstamp_config(struct atl_nic *nic, struct hwtstamp_config *config) +{ + if (config->flags) + return -EINVAL; + + switch (config->tx_type) { + case HWTSTAMP_TX_OFF: + case HWTSTAMP_TX_ON: + break; + default: + return -ERANGE; + } + + switch (config->rx_filter) { + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + break; + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_NONE: + break; + default: + return -ERANGE; + } + + return atl_ptp_hwtstamp_config_set(nic, config); +} + +static int atl_hwtstamp_set(struct atl_nic *nic, struct ifreq *ifr) +{ + struct hwtstamp_config config; + int err; + + if (!nic->ptp) + return -EOPNOTSUPP; + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + err = atl_hwtstamp_config(nic, &config); + if (err) + return err; + + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; +} + +static int atl_hwtstamp_get(struct atl_nic *nic, struct ifreq *ifr) +{ + struct hwtstamp_config config; + + if (!nic->ptp) + return -EOPNOTSUPP; + + atl_ptp_hwtstamp_config_get(nic, &config); + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; +} + +static int atl_ndo_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd) +{ + struct atl_nic *nic = netdev_priv(ndev); + + pm_runtime_get_sync(&nic->hw.pdev->dev); + + switch (cmd) { + case SIOCSHWTSTAMP: + return atl_hwtstamp_set(nic, ifr); + + case SIOCGHWTSTAMP: + return atl_hwtstamp_get(nic, ifr); + } + + pm_runtime_put(&nic->hw.pdev->dev); + + return -EOPNOTSUPP; +} + static int atl_set_mac_address(struct net_device *ndev, void *priv) { struct atl_nic *nic = netdev_priv(ndev); @@ -201,7 +286,7 @@ static int atl_set_mac_address(struct net_device *ndev, void *priv) ether_addr_copy(ndev->dev_addr, addr->sa_data); if (netif_running(ndev) && pm_runtime_active(&nic->hw.pdev->dev)) - atl_set_uc_flt(hw, 0, hw->mac_addr); + atl_set_uc_flt(hw, nic->rxf_mac.base_index, hw->mac_addr); return 0; } @@ -220,6 +305,7 @@ static const struct net_device_ops atl_ndev_ops = { .ndo_change_mtu = atl_change_mtu, #endif .ndo_set_features = atl_set_features, + .ndo_do_ioctl = atl_ndo_ioctl, .ndo_set_mac_address = atl_set_mac_address, #ifdef ATL_COMPAT_CAST_NDO_GET_STATS64 .ndo_get_stats64 = (void *)atl_get_stats64, @@ -374,6 +460,8 @@ static void atl_work(struct work_struct *work) struct atl_hw *hw = &nic->hw; int ret; + atl_ptp_work(nic); + clear_bit(ATL_ST_WORK_SCHED, &hw->state); atl_fw_watchdog(hw); @@ -421,6 +509,8 @@ static const struct pci_device_id atl_pci_tbl[] = { { PCI_VDEVICE(AQUANTIA, 0x14c0), ATL_ANTIGUA}, { PCI_VDEVICE(AQUANTIA, 0x93c0), ATL_ANTIGUA}, { PCI_VDEVICE(AQUANTIA, 0x94c0), ATL_ANTIGUA}, + { PCI_VDEVICE(AQUANTIA, 0x34c0), ATL_ANTIGUA}, + { PCI_VDEVICE(AQUANTIA, 0x11c0), ATL_ANTIGUA}, {} }; @@ -456,7 +546,7 @@ static int atl_probe(struct pci_dev *pdev, const struct pci_device_id *id) struct net_device *ndev; struct atl_nic *nic = NULL; struct atl_hw *hw; - int disable_needed; + int disable_needed = 0; pm_runtime_set_active(&pdev->dev); pm_runtime_forbid(&pdev->dev); @@ -532,12 +622,20 @@ static int atl_probe(struct pci_dev *pdev, const struct pci_device_id *id) ether_addr_copy(ndev->dev_addr, hw->mac_addr); atl_dev_dbg("got MAC address: %pM\n", hw->mac_addr); + ret = atl_ptp_init(nic); + if (ret) + goto err_ptp_init; + nic->requested_nvecs = atl_max_queues; nic->requested_tx_size = (atl_tx_ring_size & ~7); nic->requested_rx_size = (atl_rx_ring_size & ~7); nic->rx_intr_delay = atl_rx_mod; nic->tx_intr_delay = atl_tx_mod; + ret = atl_ptp_ring_alloc(nic); + if (ret) + goto err_ptp_alloc; + ret = atl_setup_datapath(nic); if (ret) goto err_datapath; @@ -583,6 +681,10 @@ static int atl_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (ret) goto err_register; + ret = atl_ptp_register(nic); + if (ret) + goto err_ptp_register; + pci_set_drvdata(pdev, nic); netif_carrier_off(ndev); @@ -603,7 +705,6 @@ static int atl_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (hw->mcp.caps_low & atl_fw2_wake_on_link_force) pm_runtime_put_noidle(&pdev->dev); - atl_intr_enable_non_ring(nic); mod_timer(&nic->work_timer, jiffies + HZ); @@ -615,10 +716,15 @@ static int atl_probe(struct pci_dev *pdev, const struct pci_device_id *id) err_hwmon_init: atl_stop(nic, true); +err_ptp_register: unregister_netdev(nic->ndev); err_register: atl_clear_datapath(nic); err_datapath: + atl_ptp_ring_free(nic); +err_ptp_alloc: + atl_ptp_free(nic); +err_ptp_init: err_hwinit: iounmap(hw->regs); err_ioremap: @@ -653,6 +759,7 @@ static void atl_remove(struct pci_dev *pdev) del_timer_sync(&nic->work_timer); cancel_work_sync(&nic->work); atl_intr_disable_all(&nic->hw); + atl_ptp_unregister(nic); unregister_netdev(nic->ndev); #if IS_ENABLED(CONFIG_ATLFWD_FWD) @@ -660,6 +767,10 @@ static void atl_remove(struct pci_dev *pdev) #endif atl_clear_datapath(nic); + + atl_ptp_ring_free(nic); + atl_ptp_free(nic); + iounmap(nic->hw.regs); free_netdev(nic->ndev); pci_release_regions(pdev); @@ -823,8 +934,12 @@ static int atl_pm_runtime_idle(struct device *dev) struct pci_dev *pdev = to_pci_dev(dev); struct atl_nic *nic = pci_get_drvdata(pdev); + /* pm_runtime_idle may be called during probe */ + if (!nic || nic->hw.pdev != pdev) + return -EBUSY; + if (!netif_carrier_ok(nic->ndev)) { - pm_schedule_suspend(&nic->hw.pdev->dev, atl_sleep_delay); + pm_schedule_suspend(dev, atl_sleep_delay); } return -EBUSY; diff --git a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_ptp.c b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_ptp.c new file mode 100644 index 0000000000000000000000000000000000000000..6cf7feaf46757d7808a693d9418e83a5c571b29b --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_ptp.c @@ -0,0 +1,1310 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Atlantic Network Driver + * + * Copyright (C) 2014-2019 aQuantia Corporation + * Copyright (C) 2019-2020 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include "atl_ptp.h" + +#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK) +#include +#include +#include +#endif + +#include "atl_ring.h" +#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK) +#include "atl_common.h" +#include "atl_ethtool.h" +#include "atl_hw_ptp.h" +#include "atl_ring_desc.h" + +#define ATL_PTP_TX_TIMEOUT (HZ * 10) + +#define POLL_SYNC_TIMER_MS 15 + +#define MAX_PTP_GPIO_COUNT 4 + +#define PTP_8TC_RING_IDX 8 +#define PTP_4TC_RING_IDX 16 +#define PTP_HWTS_RING_IDX 31 + +enum ptp_speed_offsets { + ptp_offset_idx_10 = 0, + ptp_offset_idx_100, + ptp_offset_idx_1000, + ptp_offset_idx_2500, + ptp_offset_idx_5000, + ptp_offset_idx_10000, +}; + +struct ptp_skb_ring { + struct sk_buff **buff; + spinlock_t lock; + unsigned int size; + unsigned int head; + unsigned int tail; +}; + +struct ptp_tx_timeout { + spinlock_t lock; + bool active; + unsigned long tx_start; +}; + +enum atl_ptp_queue { + ATL_PTPQ_PTP = 0, + ATL_PTPQ_HWTS = 1, + ATL_PTPQ_NUM, +}; + +struct atl_ptp { + struct atl_nic *nic; + struct hwtstamp_config hwtstamp_config; + spinlock_t ptp_lock; + spinlock_t ptp_ring_lock; + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_info; + + atomic_t offset_egress; + atomic_t offset_ingress; + + struct ptp_tx_timeout ptp_tx_timeout; + + struct napi_struct *napi; + unsigned int idx_vector; + + struct atl_queue_vec qvec[ATL_PTPQ_NUM]; + + struct ptp_skb_ring skb_ring; + + s8 udp_filter_idx; + s8 eth_type_filter_idx; + + struct delayed_work poll_sync; + u32 poll_timeout_ms; + + bool extts_pin_enabled; + u64 last_sync1588_ts; +}; + +#define atl_for_each_ptp_qvec(ptp, qvec) \ + for (qvec = &ptp->qvec[0]; \ + qvec < &ptp->qvec[ATL_PTPQ_NUM]; qvec++) + +struct ptp_tm_offset { + unsigned int mbps; + int egress; + int ingress; +}; + +static struct ptp_tm_offset ptp_offset[6]; + +static int __atl_ptp_skb_put(struct ptp_skb_ring *ring, struct sk_buff *skb) +{ + unsigned int next_head = (ring->head + 1) % ring->size; + + if (next_head == ring->tail) + return -ENOMEM; + + ring->buff[ring->head] = skb_get(skb); + ring->head = next_head; + + return 0; +} + +static int atl_ptp_skb_put(struct ptp_skb_ring *ring, struct sk_buff *skb) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&ring->lock, flags); + ret = __atl_ptp_skb_put(ring, skb); + spin_unlock_irqrestore(&ring->lock, flags); + + return ret; +} + +static struct sk_buff *__atl_ptp_skb_get(struct ptp_skb_ring *ring) +{ + struct sk_buff *skb; + + if (ring->tail == ring->head) + return NULL; + + skb = ring->buff[ring->tail]; + ring->tail = (ring->tail + 1) % ring->size; + + return skb; +} + +static struct sk_buff *atl_ptp_skb_get(struct ptp_skb_ring *ring) +{ + unsigned long flags; + struct sk_buff *skb; + + spin_lock_irqsave(&ring->lock, flags); + skb = __atl_ptp_skb_get(ring); + spin_unlock_irqrestore(&ring->lock, flags); + + return skb; +} + +static unsigned int atl_ptp_skb_buf_len(struct ptp_skb_ring *ring) +{ + unsigned long flags; + unsigned int len; + + spin_lock_irqsave(&ring->lock, flags); + len = (ring->head >= ring->tail) ? + ring->head - ring->tail : + ring->size - ring->tail + ring->head; + spin_unlock_irqrestore(&ring->lock, flags); + + return len; +} + +static int atl_ptp_skb_ring_init(struct ptp_skb_ring *ring, unsigned int size) +{ + struct sk_buff **buff = kcalloc(size, sizeof(*buff), GFP_KERNEL); + + if (!buff) + return -ENOMEM; + + spin_lock_init(&ring->lock); + + ring->buff = buff; + ring->size = size; + ring->head = 0; + ring->tail = 0; + + return 0; +} + +static void atl_ptp_skb_ring_clean(struct ptp_skb_ring *ring) +{ + struct sk_buff *skb; + + while ((skb = atl_ptp_skb_get(ring)) != NULL) + dev_kfree_skb_any(skb); +} + +static void atl_ptp_skb_ring_release(struct ptp_skb_ring *ring) +{ + if (ring->buff) { + atl_ptp_skb_ring_clean(ring); + kfree(ring->buff); + ring->buff = NULL; + } +} + +static void atl_ptp_tx_timeout_init(struct ptp_tx_timeout *timeout) +{ + spin_lock_init(&timeout->lock); + timeout->active = false; +} + +static void atl_ptp_tx_timeout_start(struct atl_ptp *ptp) +{ + struct ptp_tx_timeout *timeout = &ptp->ptp_tx_timeout; + unsigned long flags; + + spin_lock_irqsave(&timeout->lock, flags); + timeout->active = true; + timeout->tx_start = jiffies; + spin_unlock_irqrestore(&timeout->lock, flags); +} + +static void atl_ptp_tx_timeout_update(struct atl_ptp *ptp) +{ + if (!atl_ptp_skb_buf_len(&ptp->skb_ring)) { + struct ptp_tx_timeout *timeout = &ptp->ptp_tx_timeout; + unsigned long flags; + + spin_lock_irqsave(&timeout->lock, flags); + timeout->active = false; + spin_unlock_irqrestore(&timeout->lock, flags); + } +} + +static void atl_ptp_tx_timeout_check(struct atl_ptp *ptp) +{ + struct ptp_tx_timeout *timeout = &ptp->ptp_tx_timeout; + struct atl_nic *nic = ptp->nic; + unsigned long flags; + bool timeout_flag; + + timeout_flag = false; + + spin_lock_irqsave(&timeout->lock, flags); + if (timeout->active) { + timeout_flag = time_is_before_jiffies(timeout->tx_start + + ATL_PTP_TX_TIMEOUT); + /* reset active flag if timeout detected */ + if (timeout_flag) + timeout->active = false; + } + spin_unlock_irqrestore(&timeout->lock, flags); + + if (timeout_flag) { + atl_nic_err("PTP Timeout. Clearing Tx Timestamp SKBs\n"); + atl_ptp_skb_ring_clean(&ptp->skb_ring); + } +} + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0) +/* atl_ptp_adjfine + * @ptp_info: the ptp clock structure + * @ppb: parts per billion adjustment from base + * + * adjust the frequency of the ptp cycle counter by the + * indicated ppb from the base frequency. + */ +static int atl_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm) +{ + struct atl_ptp *ptp = container_of(ptp_info, struct atl_ptp, ptp_info); + struct atl_nic *nic = ptp->nic; + + hw_atl_adj_clock_freq(&nic->hw, scaled_ppm_to_ppb(scaled_ppm)); + + return 0; +} +#endif + +/* atl_ptp_adjtime + * @ptp_info: the ptp clock structure + * @delta: offset to adjust the cycle counter by + * + * adjust the timer by resetting the timecounter structure. + */ +static int atl_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta) +{ + struct atl_ptp *ptp = container_of(ptp_info, struct atl_ptp, ptp_info); + struct atl_nic *nic = ptp->nic; + unsigned long flags; + + spin_lock_irqsave(&ptp->ptp_lock, flags); + hw_atl_adj_sys_clock(&nic->hw, delta); + spin_unlock_irqrestore(&ptp->ptp_lock, flags); + + return 0; +} + +/* atl_ptp_gettime + * @ptp_info: the ptp clock structure + * @ts: timespec structure to hold the current time value + * + * read the timecounter and return the correct value on ns, + * after converting it into a struct timespec. + */ +static int atl_ptp_gettime(struct ptp_clock_info *ptp_info, struct timespec64 *ts) +{ + struct atl_ptp *ptp = container_of(ptp_info, struct atl_ptp, ptp_info); + struct atl_nic *nic = ptp->nic; + unsigned long flags; + u64 ns; + + spin_lock_irqsave(&ptp->ptp_lock, flags); + hw_atl_get_ptp_ts(&nic->hw, &ns); + spin_unlock_irqrestore(&ptp->ptp_lock, flags); + + *ts = ns_to_timespec64(ns); + + return 0; +} + +/* atl_ptp_settime + * @ptp_info: the ptp clock structure + * @ts: the timespec containing the new time for the cycle counter + * + * reset the timecounter to use a new base value instead of the kernel + * wall timer value. + */ +static int atl_ptp_settime(struct ptp_clock_info *ptp_info, + const struct timespec64 *ts) +{ + struct atl_ptp *ptp = container_of(ptp_info, struct atl_ptp, ptp_info); + struct atl_nic *nic = ptp->nic; + unsigned long flags; + u64 ns = timespec64_to_ns(ts); + u64 now; + + spin_lock_irqsave(&ptp->ptp_lock, flags); + hw_atl_get_ptp_ts(&nic->hw, &now); + hw_atl_adj_sys_clock(&nic->hw, (s64)ns - (s64)now); + + spin_unlock_irqrestore(&ptp->ptp_lock, flags); + + return 0; +} + +static void atl_ptp_convert_to_hwtstamp(struct skb_shared_hwtstamps *hwtstamp, + u64 timestamp) +{ + memset(hwtstamp, 0, sizeof(*hwtstamp)); + hwtstamp->hwtstamp = ns_to_ktime(timestamp); +} + +static int atl_ptp_hw_pin_conf(struct atl_nic *nic, u32 pin_index, u64 start, + u64 period) +{ + if (period) + atl_nic_dbg("Enable GPIO %d pulsing, start time %llu, period %u\n", + pin_index, start, (u32)period); + else + atl_nic_dbg("Disable GPIO %d pulsing, start time %llu, period %u\n", + pin_index, start, (u32)period); + + /* Notify hardware of request to being sending pulses. + * If period is ZERO then pulsen is disabled. + */ + hw_atl_gpio_pulse(&nic->hw, pin_index, start, (u32)period); + + return 0; +} + +static int atl_ptp_perout_pin_configure(struct ptp_clock_info *ptp_clock, + struct ptp_clock_request *rq, int on) +{ + struct atl_ptp *ptp = container_of(ptp_clock, struct atl_ptp, ptp_info); + struct ptp_clock_time *t = &rq->perout.period; + struct ptp_clock_time *s = &rq->perout.start; + u32 pin_index = rq->perout.index; + struct atl_nic *nic = ptp->nic; + u64 start, period; + + /* verify the request channel is there */ + if (pin_index >= ptp_clock->n_per_out) + return -EINVAL; + + /* we cannot support periods greater + * than 4 seconds due to reg limit + */ + if (t->sec > 4 || t->sec < 0) + return -ERANGE; + + /* convert to unsigned 64b ns, + * verify we can put it in a 32b register + */ + period = on ? t->sec * NSEC_PER_SEC + t->nsec : 0; + + /* verify the value is in range supported by hardware */ + if (period > U32_MAX) + return -ERANGE; + /* convert to unsigned 64b ns */ + /* TODO convert to AQ time */ + start = on ? s->sec * NSEC_PER_SEC + s->nsec : 0; + + atl_ptp_hw_pin_conf(nic, pin_index, start, period); + + return 0; +} + +static int atl_ptp_pps_pin_configure(struct ptp_clock_info *ptp_clock, + struct ptp_clock_request *rq, int on) +{ + struct atl_ptp *ptp = container_of(ptp_clock, struct atl_ptp, ptp_info); + struct atl_nic *nic = ptp->nic; + u64 start, period; + u32 pin_index = 0; + u32 rest = 0; + + /* verify the request channel is there */ + if (pin_index >= ptp_clock->n_per_out) + return -EINVAL; + + hw_atl_get_ptp_ts(&nic->hw, &start); + div_u64_rem(start, NSEC_PER_SEC, &rest); + period = on ? NSEC_PER_SEC : 0; /* PPS - pulse per second */ + start = on ? start - rest + NSEC_PER_SEC * + (rest > 990000000LL ? 2 : 1) : 0; + + atl_ptp_hw_pin_conf(nic, pin_index, start, period); + + return 0; +} + +static void atl_ptp_extts_pin_ctrl(struct atl_ptp *ptp) +{ + struct atl_nic *nic = ptp->nic; + u32 enable = ptp->extts_pin_enabled; + + hw_atl_extts_gpio_enable(&nic->hw, 0, enable); +} + +static int atl_ptp_extts_pin_configure(struct ptp_clock_info *ptp_clock, + struct ptp_clock_request *rq, int on) +{ + struct atl_ptp *ptp = container_of(ptp_clock, struct atl_ptp, ptp_info); + + u32 pin_index = rq->extts.index; + + if (pin_index >= ptp_clock->n_ext_ts) + return -EINVAL; + + ptp->extts_pin_enabled = !!on; + if (on) { + ptp->poll_timeout_ms = POLL_SYNC_TIMER_MS; + cancel_delayed_work_sync(&ptp->poll_sync); + schedule_delayed_work(&ptp->poll_sync, + msecs_to_jiffies(ptp->poll_timeout_ms)); + } + + atl_ptp_extts_pin_ctrl(ptp); + return 0; +} + +/* atl_ptp_gpio_feature_enable + * @ptp: the ptp clock structure + * @rq: the requested feature to change + * @on: whether to enable or disable the feature + */ +static int atl_ptp_gpio_feature_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + switch (rq->type) { + case PTP_CLK_REQ_EXTTS: + return atl_ptp_extts_pin_configure(ptp, rq, on); + case PTP_CLK_REQ_PEROUT: + return atl_ptp_perout_pin_configure(ptp, rq, on); + case PTP_CLK_REQ_PPS: + return atl_ptp_pps_pin_configure(ptp, rq, on); + default: + return -EOPNOTSUPP; + } + + return 0; +} + +/* atl_ptp_verify + * @ptp: the ptp clock structure + * @pin: index of the pin in question + * @func: the desired function to use + * @chan: the function channel index to use + */ +static int atl_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin, + enum ptp_pin_function func, unsigned int chan) +{ + /* verify the requested pin is there */ + if (!ptp->pin_config || pin >= ptp->n_pins) + return -EINVAL; + + /* enforce locked channels, no changing them */ + if (chan != ptp->pin_config[pin].chan) + return -EINVAL; + + /* we want to keep the functions locked as well */ + if (func != ptp->pin_config[pin].func) + return -EINVAL; + + return 0; +} + +/* atl_ptp_rx_hwtstamp - utility function which checks for RX time stamp + * @skb: particular skb to send timestamp with + * + * if the timestamp is valid, we convert it into the timecounter ns + * value, then store that result into the hwtstamps structure which + * is passed up the network stack + */ +static void atl_ptp_rx_hwtstamp(struct atl_ptp *ptp, struct sk_buff *skb, + u64 timestamp) +{ + timestamp -= atomic_read(&ptp->offset_ingress); + atl_ptp_convert_to_hwtstamp(skb_hwtstamps(skb), timestamp); +} + +static int atl_ptp_ring_index(enum atl_ptp_queue ptp_queue) +{ + switch (ptp_queue) { + case ATL_PTPQ_PTP: + /* multi-TC is not supported in FWD driver, so tc mode is + * always set to 4 TCs (each with 8 queues) for now + */ + return PTP_4TC_RING_IDX; + case ATL_PTPQ_HWTS: + return PTP_HWTS_RING_IDX; + default: + break; + } + + WARN_ONCE(1, "Invalid ptp_queue"); + return 0; +} + +static int atl_ptp_poll(struct napi_struct *napi, int budget) +{ + struct atl_queue_vec *qvec = container_of(napi, struct atl_queue_vec, napi); + struct atl_ptp *ptp = qvec->nic->ptp; + int work_done = 0; + + /* Processing PTP TX and RX traffic */ + work_done = atl_poll_qvec(&ptp->qvec[ATL_PTPQ_PTP], budget); + + /* Processing HW_TIMESTAMP RX traffic */ + atl_clean_hwts_rx(&ptp->qvec[ATL_PTPQ_HWTS].rx, budget); + + if (work_done < budget) { + napi_complete_done(ptp->napi, work_done); + atl_intr_enable(&qvec->nic->hw, BIT(atl_qvec_intr(qvec))); + /* atl_set_intr_throttle(&nic->hw, qvec->idx); */ + } + + return work_done; +} + +static irqreturn_t atl_ptp_irq(int irq, void *private) +{ + struct atl_ptp *ptp = private; + int err = 0; + + if (!ptp) { + err = -EINVAL; + goto err_exit; + } + napi_schedule_irqoff(ptp->napi); + +err_exit: + return err >= 0 ? IRQ_HANDLED : IRQ_NONE; +} + +static struct ptp_clock_info atl_ptp_clock = { + .owner = THIS_MODULE, + .name = "atlantic ptp", + .max_adj = 999999999, + .n_ext_ts = 0, + .pps = 0, +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0) + .adjfine = atl_ptp_adjfine, +#endif + .adjtime = atl_ptp_adjtime, +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0) + .gettime64 = atl_ptp_gettime, + .settime64 = atl_ptp_settime, +#else + .gettime = atl_ptp_gettime, + .settime = atl_ptp_settime, +#endif + .n_per_out = 0, + .enable = atl_ptp_gpio_feature_enable, + .n_pins = 0, + .verify = atl_ptp_verify, + .pin_config = NULL, +}; + +#define ptp_offset_init(__idx, __mbps, __egress, __ingress) do { \ + ptp_offset[__idx].mbps = (__mbps); \ + ptp_offset[__idx].egress = (__egress); \ + ptp_offset[__idx].ingress = (__ingress); } \ + while (0) + +static void atl_ptp_offset_init_from_fw(const struct atl_ptp_offset_info *offsets) +{ + int i; + + /* Load offsets for PTP */ + for (i = 0; i < ARRAY_SIZE(ptp_offset); i++) { + switch (i) { + /* 100M */ + case ptp_offset_idx_100: + ptp_offset_init(i, 100, + offsets->egress_100, + offsets->ingress_100); + break; + /* 1G */ + case ptp_offset_idx_1000: + ptp_offset_init(i, 1000, + offsets->egress_1000, + offsets->ingress_1000); + break; + /* 2.5G */ + case ptp_offset_idx_2500: + ptp_offset_init(i, 2500, + offsets->egress_2500, + offsets->ingress_2500); + break; + /* 5G */ + case ptp_offset_idx_5000: + ptp_offset_init(i, 5000, + offsets->egress_5000, + offsets->ingress_5000); + break; + /* 10G */ + case ptp_offset_idx_10000: + ptp_offset_init(i, 10000, + offsets->egress_10000, + offsets->ingress_10000); + break; + } + } +} + +static void atl_ptp_offset_init(const struct atl_ptp_offset_info *offsets) +{ + memset(ptp_offset, 0, sizeof(ptp_offset)); + + atl_ptp_offset_init_from_fw(offsets); +} + +static void atl_ptp_gpio_init(struct atl_nic *nic, + struct ptp_clock_info *info, + enum atl_gpio_pin_function *gpio_pin) +{ + struct ptp_pin_desc pin_desc[MAX_PTP_GPIO_COUNT]; + u32 extts_pin_cnt = 0; + u32 out_pin_cnt = 0; + u32 i; + + memset(pin_desc, 0, sizeof(pin_desc)); + + for (i = 0; i < MAX_PTP_GPIO_COUNT - 1; i++) { + if (gpio_pin[i] == + (GPIO_PIN_FUNCTION_PTP0 + out_pin_cnt)) { + snprintf(pin_desc[out_pin_cnt].name, + sizeof(pin_desc[out_pin_cnt].name), + "AQ_GPIO%d", i); + pin_desc[out_pin_cnt].index = out_pin_cnt; + pin_desc[out_pin_cnt].chan = out_pin_cnt; + pin_desc[out_pin_cnt++].func = PTP_PF_PEROUT; + } + } + + info->n_per_out = out_pin_cnt; + + if (nic->hw.mcp.caps_ex & atl_fw2_ex_caps_phy_ctrl_ts_pin) { + extts_pin_cnt += 1; + + snprintf(pin_desc[out_pin_cnt].name, + sizeof(pin_desc[out_pin_cnt].name), + "AQ_GPIO%d", out_pin_cnt); + pin_desc[out_pin_cnt].index = out_pin_cnt; + pin_desc[out_pin_cnt].chan = 0; + pin_desc[out_pin_cnt].func = PTP_PF_EXTTS; + } + + info->n_pins = out_pin_cnt + extts_pin_cnt; + info->n_ext_ts = extts_pin_cnt; + + if (!info->n_pins) + return; + + info->pin_config = kcalloc(info->n_pins, sizeof(struct ptp_pin_desc), + GFP_KERNEL); + + if (!info->pin_config) + return; + + memcpy(info->pin_config, &pin_desc, + sizeof(struct ptp_pin_desc) * info->n_pins); +} + +/* PTP external GPIO nanoseconds count */ +static uint64_t atl_ptp_get_sync1588_ts(struct atl_nic *nic) +{ + u64 ts = 0; + + hw_atl_get_sync_ts(&nic->hw, &ts); + + return ts; +} + +static void atl_ptp_start_work(struct atl_ptp *ptp) +{ + if (ptp->extts_pin_enabled) { + ptp->poll_timeout_ms = POLL_SYNC_TIMER_MS; + ptp->last_sync1588_ts = atl_ptp_get_sync1588_ts(ptp->nic); + schedule_delayed_work(&ptp->poll_sync, + msecs_to_jiffies(ptp->poll_timeout_ms)); + } +} + +static bool atl_ptp_sync_ts_updated(struct atl_ptp *ptp, u64 *new_ts) +{ + struct atl_nic *nic = ptp->nic; + u64 sync_ts2; + u64 sync_ts; + + sync_ts = atl_ptp_get_sync1588_ts(nic); + + if (sync_ts != ptp->last_sync1588_ts) { + sync_ts2 = atl_ptp_get_sync1588_ts(nic); + if (sync_ts != sync_ts2) { + sync_ts = sync_ts2; + sync_ts2 = atl_ptp_get_sync1588_ts(nic); + if (sync_ts != sync_ts2) { + atl_nic_err("%s: Unable to get correct GPIO TS", + __func__); + sync_ts = 0; + } + } + + *new_ts = sync_ts; + return true; + } + return false; +} + +static int atl_ptp_check_sync1588(struct atl_ptp *ptp) +{ + struct atl_nic *nic = ptp->nic; + u64 sync_ts; + + /* Sync1588 pin was triggered */ + if (atl_ptp_sync_ts_updated(ptp, &sync_ts)) { + if (ptp->extts_pin_enabled) { + struct ptp_clock_event ptp_event; + u64 time = 0; + + hw_atl_ts_to_sys_clock(&nic->hw, sync_ts, &time); + ptp_event.index = ptp->ptp_info.n_pins - 1; + ptp_event.timestamp = time; + + ptp_event.type = PTP_CLOCK_EXTTS; + ptp_clock_event(ptp->ptp_clock, &ptp_event); + } + + ptp->last_sync1588_ts = sync_ts; + } + + return 0; +} + +static void atl_ptp_poll_sync_work_cb(struct work_struct *w) +{ + struct delayed_work *dw = to_delayed_work(w); + struct atl_ptp *ptp = container_of(dw, struct atl_ptp, poll_sync); + + atl_ptp_check_sync1588(ptp); + + if (ptp->extts_pin_enabled) { + unsigned long timeout = msecs_to_jiffies(ptp->poll_timeout_ms); + + schedule_delayed_work(&ptp->poll_sync, timeout); + } +} + +#endif /* IS_REACHABLE(CONFIG_PTP_1588_CLOCK) */ + +void atl_ptp_tm_offset_set(struct atl_nic *nic, unsigned int mbps) +{ +#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK) + struct atl_ptp *ptp = nic->ptp; + int i, egress, ingress; + + if (!ptp) + return; + + egress = 0; + ingress = 0; + + for (i = 0; i < ARRAY_SIZE(ptp_offset); i++) { + if (mbps == ptp_offset[i].mbps) { + egress = ptp_offset[i].egress; + ingress = ptp_offset[i].ingress; + break; + } + } + + atomic_set(&ptp->offset_egress, egress); + atomic_set(&ptp->offset_ingress, ingress); +#endif +} + +/* atl_ptp_tx_hwtstamp - utility function which checks for TX time stamp + * + * if the timestamp is valid, we convert it into the timecounter ns + * value, then store that result into the hwtstamps structure which + * is passed up the network stack + */ +void atl_ptp_tx_hwtstamp(struct atl_nic *nic, u64 timestamp) +{ +#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK) + struct atl_ptp *ptp = nic->ptp; + struct sk_buff *skb = atl_ptp_skb_get(&ptp->skb_ring); + struct skb_shared_hwtstamps hwtstamp; + + if (!skb) { + atl_nic_err("have timestamp but tx_queues empty\n"); + return; + } + + timestamp += atomic_read(&ptp->offset_egress); + atl_ptp_convert_to_hwtstamp(&hwtstamp, timestamp); + do { + skb_tstamp_tx(skb, &hwtstamp); + dev_kfree_skb_any(skb); + skb = atl_ptp_skb_get(&ptp->skb_ring); + } while (skb); + + atl_ptp_tx_timeout_update(ptp); +#endif +} + +void atl_ptp_hwtstamp_config_get(struct atl_nic *nic, + struct hwtstamp_config *config) +{ +#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK) + struct atl_ptp *ptp = nic->ptp; + + *config = ptp->hwtstamp_config; +#endif +} + +int atl_ptp_hwtstamp_config_set(struct atl_nic *nic, + struct hwtstamp_config *config) +{ +#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK) + struct atl_ptp *ptp = nic->ptp; + static u32 ntuple_cmd = + ATL_NTC_PROTO | + ATL_NTC_L4_UDP | + ATL_NTC_DP | + ATL_RXF_ACT_TOHOST | + ATL_NTC_RXQ; + u32 ntuple_vec_idx = + ((ptp->qvec[ATL_PTPQ_PTP].idx << ATL_NTC_RXQ_SHIFT) & ATL_NTC_RXQ_MASK); + static u32 etype_cmd = + ETH_P_1588 | + ATL_RXF_ACT_TOHOST | + ATL_ETYPE_RXQ; + u32 etype_vec_idx = + ((ptp->qvec[ATL_PTPQ_PTP].idx << ATL_ETYPE_RXQ_SHIFT) & ATL_ETYPE_RXQ_MASK); + + if (config->tx_type == HWTSTAMP_TX_ON || + config->rx_filter == HWTSTAMP_FILTER_PTP_V2_EVENT) { + atl_write(&nic->hw, ATL_NTUPLE_DPORT(ptp->udp_filter_idx), + PTP_EV_PORT); + atl_write(&nic->hw, ATL_NTUPLE_CTRL(ptp->udp_filter_idx), + ATL_NTC_EN | ntuple_cmd | ntuple_vec_idx); + + atl_write(&nic->hw, ATL_RX_ETYPE_FLT(ptp->eth_type_filter_idx), + ATL_ETYPE_EN | etype_cmd | etype_vec_idx); + + nic->hw.link_state.ptp_datapath_up = true; + } else { + atl_write(&nic->hw, ATL_NTUPLE_CTRL(ptp->udp_filter_idx), ntuple_cmd); + + atl_write(&nic->hw, ATL_RX_ETYPE_FLT(ptp->eth_type_filter_idx), 0); + + nic->hw.link_state.ptp_datapath_up = false; + } + + ptp->hwtstamp_config = *config; +#endif + + return 0; +} + +int atl_ptp_qvec_intr(struct atl_queue_vec *qvec) +{ +#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK) + int i; + + for (i = 0; i != ATL_PTPQ_NUM; i++) { + if (qvec->idx == atl_ptp_ring_index(i)) + return ATL_NUM_NON_RING_IRQS - 1; + } +#endif + + WARN_ONCE(1, "Not a PTP queue vector"); + return ATL_NUM_NON_RING_IRQS; +} + +bool atl_is_ptp_ring(struct atl_nic *nic, struct atl_desc_ring *ring) +{ +#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK) + struct atl_ptp *ptp = nic->ptp; + + if (!ptp) + return false; + + return &ptp->qvec[ATL_PTPQ_PTP].tx == ring || + &ptp->qvec[ATL_PTPQ_PTP].rx == ring || + &ptp->qvec[ATL_PTPQ_HWTS].rx == ring; +#else + return false; +#endif +} + +u16 atl_ptp_extract_ts(struct atl_nic *nic, struct sk_buff *skb, u8 *p, + unsigned int len) +{ + u16 ret = 0; +#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK) + struct atl_ptp *ptp = nic->ptp; + u64 timestamp = 0; + + ret = hw_atl_rx_extract_ts(&nic->hw, p, len, ×tamp); + if (ret > 0) + atl_ptp_rx_hwtstamp(ptp, skb, timestamp); +#endif + + return ret; +} + +netdev_tx_t atl_ptp_start_xmit(struct atl_nic *nic, struct sk_buff *skb) +{ + netdev_tx_t err = NETDEV_TX_OK; +#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK) + struct atl_ptp *ptp = nic->ptp; + struct atl_desc_ring *ring; + unsigned long irq_flags; + + ring = &ptp->qvec[ATL_PTPQ_PTP].tx; + + if (skb->len <= 0) { + dev_kfree_skb_any(skb); + goto err_exit; + } + + if (atl_tx_full(ring, skb_shinfo(skb)->nr_frags + 4)) { + /* Drop packet because it doesn't make sence to delay it */ + dev_kfree_skb_any(skb); + goto err_exit; + } + + err = atl_ptp_skb_put(&ptp->skb_ring, skb); + if (err) { + atl_nic_err("SKB Ring is overflow!\n"); + return NETDEV_TX_BUSY; + } + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + atl_ptp_tx_timeout_start(ptp); + skb_tx_timestamp(skb); + + spin_lock_irqsave(&ptp->ptp_ring_lock, irq_flags); + err = atl_map_skb(skb, ring); + spin_unlock_irqrestore(&ptp->ptp_ring_lock, irq_flags); + +err_exit: +#endif + return err; +} + +void atl_ptp_work(struct atl_nic *nic) +{ +#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK) + struct atl_ptp *ptp = nic->ptp; + + if (!ptp) + return; + + atl_ptp_tx_timeout_check(ptp); +#endif +} + +int atl_ptp_irq_alloc(struct atl_nic *nic) +{ +#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK) + struct pci_dev *pdev = nic->hw.pdev; + struct atl_ptp *ptp = nic->ptp; + + if (!ptp) + return 0; + + if (nic->flags & ATL_FL_MULTIPLE_VECTORS) { + return request_irq(pci_irq_vector(pdev, ptp->idx_vector), + atl_ptp_irq, 0, nic->ndev->name, ptp); + } + + return -EINVAL; +#else + return 0; +#endif +} + +void atl_ptp_irq_free(struct atl_nic *nic) +{ +#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK) + struct atl_hw *hw = &nic->hw; + struct atl_ptp *ptp = nic->ptp; + + if (!ptp) + return; + + atl_intr_disable(hw, BIT(ptp->idx_vector)); + free_irq(pci_irq_vector(hw->pdev, ptp->idx_vector), ptp); +#endif +} + +int atl_ptp_ring_alloc(struct atl_nic *nic) +{ + int err = 0; +#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK) + struct atl_ptp *ptp = nic->ptp; + struct atl_queue_vec *qvec; + int i; + + if (!ptp) + return 0; + + for (i = 0; i != ATL_PTPQ_NUM; i++) { + qvec = &ptp->qvec[i]; + + atl_init_qvec(nic, qvec, atl_ptp_ring_index(i)); + } + + atl_for_each_ptp_qvec(ptp, qvec) { + err = atl_alloc_qvec(qvec); + if (err) + goto free; + } + + err = atl_ptp_skb_ring_init(&ptp->skb_ring, nic->requested_rx_size); + if (err != 0) { + err = -ENOMEM; + goto free; + } + + return 0; + +free: + while (--qvec >= &ptp->qvec[0]) + atl_free_qvec(qvec); +#endif + + return err; +} + +int atl_ptp_ring_start(struct atl_nic *nic) +{ + int err = 0; +#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK) + struct atl_ptp *ptp = nic->ptp; + struct atl_queue_vec *qvec; + + if (!ptp) + return 0; + + atl_for_each_ptp_qvec(ptp, qvec) { + err = atl_start_qvec(qvec); + if (err) + goto stop; + } + + netif_napi_add(nic->ndev, ptp->napi, atl_ptp_poll, 64); + napi_enable(ptp->napi); + + return 0; + +stop: + while (--qvec >= &ptp->qvec[0]) + atl_stop_qvec(qvec); +#endif + + return err; +} + +void atl_ptp_ring_stop(struct atl_nic *nic) +{ +#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK) + struct atl_ptp *ptp = nic->ptp; + struct atl_queue_vec *qvec; + + if (!ptp) + return; + + napi_disable(ptp->napi); + netif_napi_del(ptp->napi); + + atl_for_each_ptp_qvec(ptp, qvec) + atl_stop_qvec(qvec); +#endif +} + +void atl_ptp_ring_free(struct atl_nic *nic) +{ +#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK) + struct atl_ptp *ptp = nic->ptp; + + if (!ptp) + return; + + atl_free_qvec(&ptp->qvec[ATL_PTPQ_PTP]); + + atl_ptp_skb_ring_release(&ptp->skb_ring); +#endif +} + +void atl_ptp_clock_init(struct atl_nic *nic) +{ +#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK) + struct atl_ptp *ptp = nic->ptp; + struct timespec64 ts; + + ktime_get_real_ts64(&ts); + atl_ptp_settime(&ptp->ptp_info, &ts); +#endif +} + +int atl_ptp_init(struct atl_nic *nic) +{ + int err = 0; +#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK) + struct atl_mcp *mcp = &nic->hw.mcp; + struct atl_ptp *ptp; + + if (!mcp->ops->set_ptp) { + nic->ptp = NULL; + return 0; + } + + if (!(mcp->caps_ex & atl_fw2_ex_caps_phy_ptp_en)) { + nic->ptp = NULL; + return 0; + } + + ptp = kzalloc(sizeof(*ptp), GFP_KERNEL); + if (!ptp) { + err = -ENOMEM; + goto err_exit; + } + + ptp->nic = nic; + + ptp->qvec[ATL_PTPQ_PTP].type = ATL_QUEUE_PTP; + ptp->qvec[ATL_PTPQ_HWTS].type = ATL_QUEUE_HWTS; + + spin_lock_init(&ptp->ptp_lock); + spin_lock_init(&ptp->ptp_ring_lock); + + atl_ptp_tx_timeout_init(&ptp->ptp_tx_timeout); + + atomic_set(&ptp->offset_egress, 0); + atomic_set(&ptp->offset_ingress, 0); + + ptp->napi = &ptp->qvec[ATL_PTPQ_PTP].napi; + + ptp->idx_vector = ATL_IRQ_PTP; + + nic->ptp = ptp; + + /* enable ptp counter */ + nic->hw.link_state.ptp_available = true; + mcp->ops->set_ptp(&nic->hw, true); + atl_ptp_clock_init(nic); + + INIT_DELAYED_WORK(&ptp->poll_sync, &atl_ptp_poll_sync_work_cb); + ptp->eth_type_filter_idx = atl_reserve_filter(ATL_RXF_ETYPE); + ptp->udp_filter_idx = atl_reserve_filter(ATL_RXF_NTUPLE); + + return 0; + +err_exit: + if (ptp) + kfree(ptp->ptp_info.pin_config); + kfree(ptp); + nic->ptp = NULL; +#endif + + return err; +} + +int atl_ptp_register(struct atl_nic *nic) +{ + int err = 0; + +#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK) + struct atl_ptp_offset_info ptp_offset_info; + enum atl_gpio_pin_function gpio_pin[3]; + struct atl_mcp *mcp = &nic->hw.mcp; + struct atl_ptp *ptp = nic->ptp; + struct ptp_clock *clock; + + if (!ptp) + return 0; + + err = atl_read_mcp_mem(&nic->hw, mcp->fw_stat_addr + atl_fw2_stat_ptp_offset, + &ptp_offset_info, sizeof(ptp_offset_info)); + if (err) + return err; + + err = atl_read_mcp_mem(&nic->hw, mcp->fw_stat_addr + atl_fw2_stat_gpio_pin, + &gpio_pin, sizeof(gpio_pin)); + if (err) + return err; + + atl_ptp_offset_init(&ptp_offset_info); + + ptp->ptp_info = atl_ptp_clock; + atl_ptp_gpio_init(nic, &ptp->ptp_info, &gpio_pin[0]); + clock = ptp_clock_register(&ptp->ptp_info, &nic->ndev->dev); + if (IS_ERR_OR_NULL(clock)) { + netdev_err(nic->ndev, "ptp_clock_register failed\n"); + err = PTR_ERR(clock); + goto err_exit; + } + ptp->ptp_clock = clock; + +err_exit: +#endif + + return err; +} + +void atl_ptp_unregister(struct atl_nic *nic) +{ +#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK) + struct atl_ptp *ptp = nic->ptp; + + if (!ptp) + return; + + ptp_clock_unregister(ptp->ptp_clock); +#endif +} + +void atl_ptp_free(struct atl_nic *nic) +{ +#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK) + struct atl_mcp *mcp = &nic->hw.mcp; + struct atl_ptp *ptp = nic->ptp; + + if (!ptp) + return; + + atl_release_filter(ATL_RXF_ETYPE); + atl_release_filter(ATL_RXF_NTUPLE); + + cancel_delayed_work_sync(&ptp->poll_sync); + /* disable ptp */ + mcp->ops->set_ptp(&nic->hw, false); + + kfree(ptp->ptp_info.pin_config); + + kfree(ptp); + nic->ptp = NULL; +#endif +} + +struct ptp_clock *atl_ptp_get_ptp_clock(struct atl_nic *nic) +{ +#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK) + return nic->ptp->ptp_clock; +#else + return NULL; +#endif +} + +int atl_ptp_link_change(struct atl_nic *nic) +{ +#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK) + struct atl_ptp *ptp = nic->ptp; + struct atl_hw *hw = &nic->hw; + + if (!ptp) + return 0; + + if (hw->mcp.ops->check_link(hw)) + atl_ptp_start_work(ptp); + else + cancel_delayed_work_sync(&ptp->poll_sync); +#endif + + return 0; +} diff --git a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_ptp.h b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_ptp.h new file mode 100644 index 0000000000000000000000000000000000000000..4449b0044e7bdd34a46033fcb6382288ac952284 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_ptp.h @@ -0,0 +1,65 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Atlantic Network Driver + * + * Copyright (C) 2014-2019 aQuantia Corporation + * Copyright (C) 2019-2020 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef ATL_PTP_H +#define ATL_PTP_H + +#include + +#include "atl_compat.h" + +struct atl_nic; +struct atl_queue_vec; + +/* Common functions */ +int atl_ptp_init(struct atl_nic *nic); +int atl_ptp_register(struct atl_nic *nic); + +void atl_ptp_unregister(struct atl_nic *nic); +void atl_ptp_free(struct atl_nic *nic); + +int atl_ptp_irq_alloc(struct atl_nic *nic); +void atl_ptp_irq_free(struct atl_nic *nic); + +int atl_ptp_ring_alloc(struct atl_nic *nic); +void atl_ptp_ring_free(struct atl_nic *nic); + +int atl_ptp_ring_start(struct atl_nic *nic); +void atl_ptp_ring_stop(struct atl_nic *nic); + +void atl_ptp_work(struct atl_nic *nic); + +void atl_ptp_tm_offset_set(struct atl_nic *nic, unsigned int mbps); + +void atl_ptp_clock_init(struct atl_nic *nic); + +int atl_ptp_qvec_intr(struct atl_queue_vec *qvec); + +/* Traffic processing functions */ +netdev_tx_t atl_ptp_start_xmit(struct atl_nic *nic, struct sk_buff *skb); +void atl_ptp_tx_hwtstamp(struct atl_nic *nic, u64 timestamp); + +/* Check for PTP availability before calling! */ +void atl_ptp_hwtstamp_config_get(struct atl_nic *nic, + struct hwtstamp_config *config); +int atl_ptp_hwtstamp_config_set(struct atl_nic *nic, + struct hwtstamp_config *config); + +/* Return whether ring belongs to PTP or not*/ +bool atl_is_ptp_ring(struct atl_nic *nic, struct atl_desc_ring *ring); +u16 atl_ptp_extract_ts(struct atl_nic *nic, struct sk_buff *skb, u8 *p, + unsigned int len); + +struct ptp_clock *atl_ptp_get_ptp_clock(struct atl_nic *nic); + +int atl_ptp_link_change(struct atl_nic *nic); + +#endif /* ATL_PTP_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_qcom_ipa.c b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_qcom_ipa.c index f20926b0e31f21466423b80c94a3746deba1aef2..b9b594b2feb09d97f68af93cb64ce41ad4ea4a78 100644 --- a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_qcom_ipa.c +++ b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_qcom_ipa.c @@ -20,6 +20,11 @@ #include "atl_fwd.h" #include "atl_qcom_ipa.h" +#include +#include +#include + +#define IPA_ETH_RX_SOFTIRQ_THRESH 16 #if ATL_FWD_API_VERSION >= 2 && IPA_ETH_API_VER >= 4 #define ATL_IPA_SUPPORT_NOTIFY @@ -452,21 +457,36 @@ int atl_ipa_moderate_event(struct ipa_eth_channel *ch, unsigned long event, return atl_fwd_set_ring_intr_mod(CH_RING(ch), min_usecs, max_usecs); } -#if IPA_ETH_API_VER >= 7 +static int atl_ipa_fwd_receive_skb(struct net_device *ndev, struct sk_buff *skb) +{ + struct atl_nic *nic = netdev_priv(ndev); + struct iphdr *ip; + + ip = (struct iphdr *)&skb->data[ETH_HLEN]; + nic->stats.rx_fwd.packets++; + nic->stats.rx_fwd.bytes += skb->len; + + skb->protocol = eth_type_trans(skb, ndev); + + /* Submit packet to network stack */ + /* If its a ping packet submit it via rx_ni else use rx */ + if (ip->protocol == IPPROTO_ICMP) { + return netif_rx_ni(skb); + } else if ((nic->stats.rx.packets % + IPA_ETH_RX_SOFTIRQ_THRESH) == 0) { + return netif_rx_ni(skb); + } else { + return atl_fwd_receive_skb(ndev, skb); + } +} + static int atl_ipa_receive_skb(struct ipa_eth_device *eth_dev, struct sk_buff *skb, bool in_napi) { return in_napi ? atl_fwd_napi_receive_skb(eth_dev->net_dev, skb) : - atl_fwd_receive_skb(eth_dev->net_dev, skb); + atl_ipa_fwd_receive_skb(eth_dev->net_dev, skb); } -#else -static int atl_ipa_receive_skb(struct ipa_eth_device *eth_dev, - struct sk_buff *skb) -{ - return atl_fwd_receive_skb(eth_dev->net_dev, skb); -} -#endif static int atl_ipa_transmit_skb(struct ipa_eth_device *eth_dev, struct sk_buff *skb) diff --git a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_regs.h b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_regs.h index 8237f566bb8829b47a708e88feb8a9e006d5e35a..1125ba299dda0dc5cb903154b116a217ea8ab07e 100644 --- a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_regs.h +++ b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_regs.h @@ -74,6 +74,8 @@ enum mcp_scratchpad { FW2_LINK_RES_LOW = 29, /* 0x370 */ FW1_EFUSE_SHADOW = 30, FW2_LINK_RES_HIGH = 30, /* 0x374 */ + FW3_EXT_REQ = 31, /* 0x378 */ + FW3_EXT_RES = 32, /* 0x37c */ RBL_STS = 35, /* 0x388 */ }; @@ -100,6 +102,8 @@ enum mcp_scratchpad { /* RX @ 0x5000 */ #define ATL_RX_CTRL1 0x5000 +#define ATL_RX_SPARE_CTRL0 0x50A0 +#define ATL_RX_SPARE_CTRL1 0x50A4 #define ATL2_RX_FLT_L2_BC_TAG 0x50F0 #define ATL_RX_FLT_CTRL1 0x5100 #define ATL_RX_FLT_CTRL2 0x5104 @@ -114,13 +118,14 @@ enum mcp_scratchpad { #define ATL_RX_VLAN_FLT(idx) ATL_REG_STRIDE(0x5290, 4, idx) #define ATL_RX_ETYPE_FLT(idx) ATL_REG_STRIDE(0x5300, 4, idx) #define ATL2_RX_ETYPE_TAG(idx) ATL_REG_STRIDE(0x5340, 4, idx) -#define ATL_ETYPE_FLT_NUM 15 +#define ATL_ETYPE_FLT_NUM 16 #define ATL_NTUPLE_CTRL(idx) ATL_REG_STRIDE(0x5380, 4, idx) #define ATL_NTUPLE_SADDR(idx) ATL_REG_STRIDE(0x53b0, 4, idx) #define ATL_NTUPLE_DADDR(idx) ATL_REG_STRIDE(0x53d0, 4, idx) #define ATL_NTUPLE_SPORT(idx) ATL_REG_STRIDE(0x5400, 4, idx) #define ATL_NTUPLE_DPORT(idx) ATL_REG_STRIDE(0x5420, 4, idx) #define ATL_NTUPLE_FLT_NUM 8 +#define ATL_NTUPLE_V6_FLT_NUM 2 #define ATL_RX_FLEX_FLT_CTRL(idx) ATL_REG_STRIDE(0x5460, 0x20, idx) #define ATL_FLEX_FLT_NUM 2 #define ATL_RX_RSS_CTRL 0x54c0 diff --git a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_ring.c b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_ring.c index 93d99db691785e3b345b1606cca3771e012f2760..d0b1b3ca3687d05db084d0baa6315fcf428a9c77 100644 --- a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_ring.c @@ -24,6 +24,7 @@ #include "atl_trace.h" #include "atl_fwdnl.h" +#include "atl_hw_ptp.h" static inline uint32_t fetch_tx_head(struct atl_desc_ring *ring) { @@ -34,7 +35,7 @@ static inline uint32_t fetch_tx_head(struct atl_desc_ring *ring) #endif } -static int tx_full(struct atl_desc_ring *ring, int needed) +int atl_tx_full(struct atl_desc_ring *ring, int needed) { struct atl_nic *nic = ring->nic; @@ -123,7 +124,7 @@ static netdev_tx_t atl_map_xmit_skb(struct sk_buff *skb, while (len > ATL_DATA_PER_TXD) { desc->len = cpu_to_le16(ATL_DATA_PER_TXD); trace_atl_tx_descr(ring->qvec->idx, idx, (u64 *)desc); - WRITE_ONCE(ring->hw.descs[idx].tx, *desc); + ring->hw.descs[idx].tx = *desc; bump_ptr(idx, ring, 1); daddr += ATL_DATA_PER_TXD; len -= ATL_DATA_PER_TXD; @@ -135,7 +136,7 @@ static netdev_tx_t atl_map_xmit_skb(struct sk_buff *skb, break; trace_atl_tx_descr(ring->qvec->idx, idx, (u64 *)desc); - WRITE_ONCE(ring->hw.descs[idx].tx, *desc); + ring->hw.descs[idx].tx = *desc; bump_ptr(idx, ring, 1); txbuf = &ring->txbufs[idx]; len = skb_frag_size(frag); @@ -153,14 +154,14 @@ static netdev_tx_t atl_map_xmit_skb(struct sk_buff *skb, desc->cmd |= tx_desc_cmd_wb; #endif trace_atl_tx_descr(ring->qvec->idx, idx, (u64 *)desc); - WRITE_ONCE(ring->hw.descs[idx].tx, *desc); + ring->hw.descs[idx].tx = *desc; first_buf->last = idx; bump_ptr(idx, ring, 1); ring->txbufs[idx].last = -1; ring->tail = idx; /* Stop queue if no space for another packet */ - tx_full(ring, atl_tx_free_low); + atl_tx_full(ring, atl_tx_free_low); /* Delay bumping the HW tail if another packet is pending and * there's space for it. @@ -247,11 +248,25 @@ netdev_tx_t atl_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct atl_nic *nic = netdev_priv(ndev); struct atl_desc_ring *ring = &nic->qvecs[skb->queue_mapping].tx; - unsigned int len = skb->len; - struct atl_tx_desc *desc; - struct atl_txbuf *txbuf; - uint32_t cmd_from_ctx; + if (unlikely(nic->hw.link_state.ptp_datapath_up)) { + /* Hardware adds the Timestamp for PTPv2 802.AS1 + * and PTPv2 IPv4 UDP. + * We have to push even general 320 port messages to the ptp + * queue explicitly. This is a limitation of current firmware + * and hardware PTP design of the chip. Otherwise ptp stream + * will fail to sync + */ + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) || + unlikely((ip_hdr(skb)->version == 4) && + (ip_hdr(skb)->protocol == IPPROTO_UDP) && + ((udp_hdr(skb)->dest == htons(319)) || + (udp_hdr(skb)->dest == htons(320)))) || + unlikely(eth_hdr(skb)->h_proto == htons(ETH_P_1588))) + return atl_ptp_start_xmit(nic, skb); + } + + skb_tx_timestamp(skb); if (nic->priv_flags & ATL_PF_BIT(LPB_NET_DMA)) return NETDEV_TX_BUSY; @@ -263,11 +278,21 @@ netdev_tx_t atl_start_xmit(struct sk_buff *skb, struct net_device *ndev) return atlfwd_nl_xmit(skb, ndev); #endif - if (tx_full(ring, skb_shinfo(skb)->nr_frags + 4)) { + if (atl_tx_full(ring, skb_shinfo(skb)->nr_frags + 4)) { atl_update_ring_stat(ring, tx.tx_busy, 1); return NETDEV_TX_BUSY; } + return atl_map_skb(skb, ring); +} + +netdev_tx_t atl_map_skb(struct sk_buff *skb, struct atl_desc_ring *ring) +{ + unsigned int len = skb->len; + struct atl_tx_desc *desc; + struct atl_txbuf *txbuf; + uint32_t cmd_from_ctx; + txbuf = &ring->txbufs[ring->tail]; txbuf->skb = skb; @@ -378,14 +403,17 @@ static bool atl_clean_tx(struct atl_desc_ring *ring) } } while (--budget); - u64_stats_update_begin(&ring->syncp); - ring->stats.tx.bytes += bytes; - ring->stats.tx.packets += packets; - u64_stats_update_end(&ring->syncp); + if (likely(ring->qvec->type != ATL_QUEUE_PTP)) { + u64_stats_update_begin(&ring->syncp); + ring->stats.tx.bytes += bytes; + ring->stats.tx.packets += packets; + u64_stats_update_end(&ring->syncp); + } - WRITE_ONCE(ring->head, first); + ring->head = first; - if (ring_space(ring) > atl_tx_free_high) { + if (likely(ring->qvec->type != ATL_QUEUE_PTP) && + ring_space(ring) > atl_tx_free_high) { struct net_device *ndev = nic->ndev; smp_mb(); @@ -552,10 +580,12 @@ void atl_rx_hash(struct sk_buff *skb, struct atl_rx_desc_wb *desc, static int atl_napi_receive_skb(struct atl_desc_ring *ring, struct sk_buff *skb) { + bool is_ptp_ring = atl_is_ptp_ring(ring->nic, ring); struct net_device *ndev = ring->nic->ndev; struct napi_struct *napi = &ring->qvec->napi; - skb_record_rx_queue(skb, ring->qvec->idx); + /* Send all PTP traffic to 0 queue */ + skb_record_rx_queue(skb, is_ptp_ring ? 0 : ring->qvec->idx); skb->protocol = eth_type_trans(skb, ndev); napi_gro_receive(napi, skb); @@ -726,6 +756,26 @@ static int atl_fill_rx(struct atl_desc_ring *ring, uint32_t count, bool atomic) return ret; } +static int atl_fill_hwts_rx(struct atl_desc_ring *ring, uint32_t count, bool atomic) +{ + struct atl_rx_desc *desc; + + while (count) { + DECLARE_SCRATCH_DESC(scratch); + + desc = &DESC_PTR(ring, ring->tail, scratch)->rx; + desc->daddr = ring->hw.daddr + ring->hw.size * sizeof(*ring->hw.descs); + desc->haddr = 0; + + COMMIT_DESC(ring, ring->tail, scratch); + + bump_tail(ring, 1); + count--; + } + + return 0; +} + static inline void atl_get_rxpage(struct atl_pgref *pgref) { pgref->rxpage->mapcount++; @@ -945,6 +995,7 @@ static struct sk_buff *atl_process_rx_frag(struct atl_desc_ring *ring, struct atl_cb *atl_cb; struct atl_pgref *headref = &rxbuf->head, *dataref = &rxbuf->data; struct device *dev = &ring->nic->hw.pdev->dev; + bool is_ptp_ring = atl_is_ptp_ring(ring->nic, ring); if (unlikely(wb->rdm_err)) { if (skb && skb != (void *)-1l) @@ -965,6 +1016,10 @@ static struct sk_buff *atl_process_rx_frag(struct atl_desc_ring *ring, hdr_len = wb->hdr_len; data_len = atl_data_len(wb); + if (is_ptp_ring) { + data_len -= atl_ptp_extract_ts(ring->nic, skb, atl_buf_vaddr(dataref), + data_len); + } if (atl_rx_linear) { /* Linear skb mode. The entire packet was DMA'd into @@ -1142,6 +1197,42 @@ int atl_clean_rx(struct atl_desc_ring *ring, int budget, return packets; } +int atl_clean_hwts_rx(struct atl_desc_ring *ring, int budget) +{ + unsigned int refill_batch = + min_t(typeof(atl_rx_refill_batch), atl_rx_refill_batch, + ring->hw.size - 1); + unsigned int packets = 0; + + while (packets < budget) { + uint32_t space = ring_space(ring); + struct atl_rx_desc_hwts_wb *wb; + struct atl_rxbuf *rxbuf; + u64 ns; + DECLARE_SCRATCH_DESC(scratch); + + if (space >= refill_batch) + atl_fill_hwts_rx(ring, space, true); + + rxbuf = &ring->rxbufs[ring->head]; + + wb = &DESC_PTR(ring, ring->head, scratch)->hwts_wb; + FETCH_DESC(ring, ring->head, scratch); + + if (!wb->dd) + break; + DESC_RMB(); + + hw_atl_extract_hwts(&ring->nic->hw, wb, &ns); + atl_ptp_tx_hwtstamp(ring->nic, ns); + + bump_head(ring, 1); + packets++; + } + + return packets; +} + unsigned int atl_min_intr_delay = 10; module_param_named(min_intr_delay, atl_min_intr_delay, uint, 0644); @@ -1155,11 +1246,18 @@ static void atl_set_intr_throttle(struct atl_queue_vec *qvec) static int atl_poll(struct napi_struct *napi, int budget) { struct atl_queue_vec *qvec; + + qvec = container_of(napi, struct atl_queue_vec, napi); + + return atl_poll_qvec(qvec, budget); +} + +int atl_poll_qvec(struct atl_queue_vec *qvec, int budget) +{ struct atl_nic *nic; bool clean_done; int rx_cleaned; - qvec = container_of(napi, struct atl_queue_vec, napi); nic = qvec->nic; clean_done = atl_clean_tx(&qvec->tx); @@ -1170,9 +1268,11 @@ static int atl_poll(struct napi_struct *napi, int budget) if (!clean_done) return budget; - napi_complete_done(napi, rx_cleaned); - atl_intr_enable(&nic->hw, BIT(atl_qvec_intr(qvec))); - /* atl_set_intr_throttle(&nic->hw, qvec->idx); */ + if (likely(qvec->type != ATL_QUEUE_PTP)) { + napi_complete_done(&qvec->napi, rx_cleaned); + atl_intr_enable(&nic->hw, BIT(atl_qvec_intr(qvec))); + /* atl_set_intr_throttle(&nic->hw, qvec->idx); */ + } return rx_cleaned; } @@ -1255,6 +1355,7 @@ void atl_clear_datapath(struct atl_nic *nic) if (!test_and_clear_bit(ATL_ST_CONFIGURED, &nic->hw.state)) return; + atl_ptp_irq_free(nic); atl_free_link_intr(nic); if (nic->flags & ATL_FL_MULTIPLE_VECTORS) { @@ -1276,6 +1377,8 @@ void atl_clear_datapath(struct atl_nic *nic) netif_napi_del(&qvecs[i].napi); } + atl_ptp_ring_stop(nic); + kfree(to_irq_work(qvecs[0].work)); kfree(qvecs); nic->qvecs = NULL; @@ -1306,6 +1409,28 @@ static void atl_calc_affinities(struct atl_nic *nic) put_online_cpus(); } +void atl_init_qvec(struct atl_nic *nic, struct atl_queue_vec *qvec, int idx) +{ + qvec->nic = nic; + qvec->idx = idx; + + qvec->rx.hw.reg_base = ATL_RX_RING(idx); + qvec->rx.hw.size = nic->requested_rx_size; + qvec->rx.nic = nic; + qvec->rx.qvec = qvec; + + qvec->tx.hw.reg_base = ATL_TX_RING(idx); + qvec->tx.hw.size = nic->requested_tx_size; + qvec->tx.nic = nic; + qvec->tx.qvec = qvec; + + u64_stats_init(&qvec->rx.syncp); + u64_stats_init(&qvec->tx.syncp); + + if (likely(qvec->type == ATL_QUEUE_REGULAR)) + netif_napi_add(nic->ndev, &qvec->napi, atl_poll, 64); +} + int atl_setup_datapath(struct atl_nic *nic) { struct legacy_irq_work *irq_work = NULL; @@ -1337,24 +1462,16 @@ int atl_setup_datapath(struct atl_nic *nic) if (ret) goto err_link_intr; - for (i = 0; i < nvecs; i++, qvec++) { - qvec->nic = nic; - qvec->idx = i; - - qvec->rx.hw.reg_base = ATL_RX_RING(i); - qvec->rx.hw.size = nic->requested_rx_size; - qvec->rx.nic = nic; - qvec->rx.qvec = qvec; - - qvec->tx.hw.reg_base = ATL_TX_RING(i); - qvec->tx.hw.size = nic->requested_tx_size; - qvec->tx.nic = nic; - qvec->tx.qvec = qvec; + ret = atl_ptp_irq_alloc(nic); + if (ret < 0) + goto err_ptp_intr; - u64_stats_init(&qvec->rx.syncp); - u64_stats_init(&qvec->tx.syncp); + ret = atl_ptp_ring_start(nic); + if (ret < 0) + goto err_ptp_ring; - netif_napi_add(nic->ndev, &qvec->napi, atl_poll, 64); + for (i = 0; i < nvecs; i++, qvec++) { + atl_init_qvec(nic, qvec, i); if (unlikely(irq_work)) { INIT_WORK(&irq_work[i].work, atl_ring_work); @@ -1370,6 +1487,12 @@ int atl_setup_datapath(struct atl_nic *nic) set_bit(ATL_ST_CONFIGURED, &nic->hw.state); return 0; +err_ptp_ring: + atl_ptp_irq_free(nic); + +err_ptp_intr: + atl_free_link_intr(nic); + err_link_intr: kfree(irq_work); @@ -1453,34 +1576,54 @@ static void atl_free_tx_bufs(struct atl_desc_ring *ring) } } +static size_t atl_ring_extra_size(struct atl_desc_ring *ring) +{ + switch (ring->qvec->type) { + case ATL_QUEUE_REGULAR: + case ATL_QUEUE_PTP: + return 0; + case ATL_QUEUE_HWTS: + return ATL_RX_BUF_SIZE; + default: + WARN_ONCE(true, "Unknown queue type\n"); + break; + } + + return 0; +} + static void atl_free_ring(struct atl_desc_ring *ring) { + size_t extra = atl_ring_extra_size(ring); + if (ring->bufs) { vfree(ring->bufs); ring->bufs = 0; } - atl_free_descs(ring->nic, &ring->hw); + atl_free_descs(ring->nic, &ring->hw, extra); } static int atl_alloc_ring(struct atl_desc_ring *ring, size_t buf_size, char *type) { - int ret; + size_t extra = atl_ring_extra_size(ring); struct atl_nic *nic = ring->nic; int idx = ring->qvec->idx; + int ret; - ret = atl_alloc_descs(nic, &ring->hw); + ret = atl_alloc_descs(nic, &ring->hw, extra); if (ret) { atl_nic_err("Couldn't alloc %s[%d] descriptors\n", type, idx); return ret; } - ring->bufs = vzalloc(ring->hw.size * buf_size); - if (!ring->bufs) { - atl_nic_err("Couldn't alloc %s[%d] %sbufs\n", type, idx, type); - ret = -ENOMEM; - goto free; + if (likely(ring->qvec->type != ATL_QUEUE_HWTS)) { + ring->bufs = vzalloc(ring->hw.size * buf_size); + if (!ring->bufs) { + ret = -ENOMEM; + goto free; + } } ring->head = ring->tail = @@ -1529,6 +1672,9 @@ static void atl_free_qvec_intr(struct atl_queue_vec *qvec) struct atl_nic *nic = qvec->nic; int vector; + if (unlikely(!nic)) + return; + if (!(nic->flags & ATL_FL_MULTIPLE_VECTORS)) return; @@ -1537,38 +1683,60 @@ static void atl_free_qvec_intr(struct atl_queue_vec *qvec) free_irq(vector, &qvec->napi); } -static int atl_alloc_qvec(struct atl_queue_vec *qvec) +int atl_alloc_qvec(struct atl_queue_vec *qvec) { struct atl_txbuf *txbuf; int count = qvec->tx.hw.size; - int ret; + int ret = 0; + + switch (qvec->type) { + case ATL_QUEUE_REGULAR: + ret = atl_alloc_qvec_intr(qvec); + break; + case ATL_QUEUE_PTP: + case ATL_QUEUE_HWTS: + break; + default: + WARN_ONCE(true, "Unknown queue type\n"); + break; + } - ret = atl_alloc_qvec_intr(qvec); if (ret) return ret; - ret = atl_alloc_ring(&qvec->tx, sizeof(struct atl_txbuf), "tx"); - if (ret) - goto free_irq; + if (likely(qvec->type != ATL_QUEUE_HWTS)) { + ret = atl_alloc_ring(&qvec->tx, sizeof(struct atl_txbuf), "tx"); + if (ret) + goto free_irq; + } ret = atl_alloc_ring(&qvec->rx, sizeof(struct atl_rxbuf), "rx"); if (ret) goto free_tx; - for (txbuf = qvec->tx.txbufs; count; count--) - (txbuf++)->last = -1; + if (likely(qvec->type != ATL_QUEUE_HWTS)) { + for (txbuf = qvec->tx.txbufs; count; count--) + (txbuf++)->last = -1; + } return 0; free_tx: - atl_free_ring(&qvec->tx); + if (likely(qvec->type != ATL_QUEUE_HWTS)) + atl_free_ring(&qvec->tx); free_irq: - atl_free_qvec_intr(qvec); + switch (qvec->type) { + case ATL_QUEUE_REGULAR: + atl_free_qvec_intr(qvec); + break; + default: + break; + } return ret; } -static void atl_free_qvec(struct atl_queue_vec *qvec) +void atl_free_qvec(struct atl_queue_vec *qvec) { struct atl_desc_ring *rx = &qvec->rx; struct atl_desc_ring *tx = &qvec->tx; @@ -1576,8 +1744,20 @@ static void atl_free_qvec(struct atl_queue_vec *qvec) atl_free_rx_bufs(rx); atl_free_ring(rx); - atl_free_ring(tx); - atl_free_qvec_intr(qvec); + if (likely(qvec->type != ATL_QUEUE_HWTS)) + atl_free_ring(tx); + + switch (qvec->type) { + case ATL_QUEUE_REGULAR: + atl_free_qvec_intr(qvec); + break; + case ATL_QUEUE_PTP: + case ATL_QUEUE_HWTS: + break; + default: + WARN_ONCE(true, "Unknown queue type\n"); + break; + } } int atl_alloc_rings(struct atl_nic *nic) @@ -1655,22 +1835,36 @@ int atl_init_rx_ring(struct atl_desc_ring *rx) if (rx->head > 0x1FFF) return -EIO; - ret = atl_fill_rx(rx, ring_space(rx), false); + switch (rx->qvec->type) { + case ATL_QUEUE_HWTS: + ret = atl_fill_hwts_rx(rx, ring_space(rx), false); + break; + case ATL_QUEUE_PTP: + case ATL_QUEUE_REGULAR: + ret = atl_fill_rx(rx, ring_space(rx), false); + break; + default: + WARN_ONCE(true, "Unknown queue type\n"); + break; + } + if (ret) return ret; - rx->next_to_recycle = rx->tail; - /* rxbuf at ->next_to_recycle is always kept empty so that - * atl_maybe_recycle_rxbuf() always have a spot to recycle into - * without overwriting a pgref to an already allocated page, - * leaking memory. It's also the guard element in the ring - * that keeps ->tail from overrunning ->head. If it's nonempty - * on ring init (e.g. after a sleep-wake cycle) just release - * the pages. - */ - rxbuf = &rx->rxbufs[rx->next_to_recycle]; - atl_put_rxpage(&rxbuf->head, &hw->pdev->dev); - atl_put_rxpage(&rxbuf->data, &hw->pdev->dev); + if (likely(rx->qvec->type != ATL_QUEUE_HWTS)) { + rx->next_to_recycle = rx->tail; + /* rxbuf at ->next_to_recycle is always kept empty so that + * atl_maybe_recycle_rxbuf() always have a spot to recycle into + * without overwriting a pgref to an already allocated page, + * leaking memory. It's also the guard element in the ring + * that keeps ->tail from overrunning ->head. If it's nonempty + * on ring init (e.g. after a sleep-wake cycle) just release + * the pages. + */ + rxbuf = &rx->rxbufs[rx->next_to_recycle]; + atl_put_rxpage(&rxbuf->head, &hw->pdev->dev); + atl_put_rxpage(&rxbuf->data, &hw->pdev->dev); + } return 0; } @@ -1696,17 +1890,39 @@ static void atl_start_rx_ring(struct atl_desc_ring *ring) atl_write(hw, ATL_RING_BASE_MSW(ring), upper_32_bits(ring->hw.daddr)); atl_write(hw, ATL_RX_RING_TAIL(ring), ring->tail); - atl_write(hw, ATL_RX_RING_BUF_SIZE(ring), - (ATL_RX_HDR_SIZE / 64) << 8 | ATL_RX_BUF_SIZE / 1024); + switch (ring->qvec->type) { + case ATL_QUEUE_REGULAR: + atl_write(hw, ATL_RX_RING_BUF_SIZE(ring), + (ATL_RX_HDR_SIZE / 64) << 8 | ATL_RX_BUF_SIZE / 1024); + break; + case ATL_QUEUE_PTP: + case ATL_QUEUE_HWTS: + atl_write(hw, ATL_RX_RING_BUF_SIZE(ring), + ATL_RX_BUF_SIZE / 1024); + break; + default: + WARN_ONCE(true, "Unknown queue type\n"); + break; + } atl_write(hw, ATL_RX_RING_THRESH(ring), 8 << 0x10 | 24 << 0x18); /* LRO */ atl_write_bits(hw, ATL_RX_LRO_PKT_LIM(idx), (idx & 7) * 4, 2, 3); - /* Enable ring | VLAN offload | header split in non-linear mode */ - rx_ctl = BIT(31) | BIT(29) | ring->hw.size | - (atl_rx_linear ? 0 : BIT(28)); + /* Enable ring | VLAN offload */ + rx_ctl = BIT(31) | BIT(29) | ring->hw.size; + switch (ring->qvec->type) { + case ATL_QUEUE_REGULAR: + /* Enable header split in non-linear mode */ + rx_ctl |= (atl_rx_linear ? 0 : BIT(28)); + break; + case ATL_QUEUE_PTP: + case ATL_QUEUE_HWTS: + break; + default: + break; + } atl_write(hw, ATL_RX_RING_CTL(ring), rx_ctl); } @@ -1722,12 +1938,23 @@ static void atl_start_tx_ring(struct atl_desc_ring *ring) atl_write(hw, ATL_TX_LSO_CTRL, BIT(nic->nvecs) - 1); atl_write(hw, ATL_TX_RING_TAIL(ring), ring->tail); - atl_write(hw, ATL_TX_RING_THRESH(ring), 8 << 8 | 8 << 0x10 | - 24 << 0x18); + switch (ring->qvec->type) { + case ATL_QUEUE_REGULAR: + atl_write(hw, ATL_TX_RING_THRESH(ring), 8 << 8 | 8 << 0x10 | + 24 << 0x18); + break; + case ATL_QUEUE_PTP: + case ATL_QUEUE_HWTS: + atl_write(hw, ATL_TX_RING_THRESH(ring), 0); + break; + default: + WARN_ONCE(true, "Unknown queue type\n"); + break; + } atl_write(hw, ATL_TX_RING_CTL(ring), BIT(31) | ring->hw.size); } -static int atl_start_qvec(struct atl_queue_vec *qvec) +int atl_start_qvec(struct atl_queue_vec *qvec) { struct atl_desc_ring *rx = &qvec->rx; struct atl_desc_ring *tx = &qvec->tx; @@ -1738,25 +1965,29 @@ static int atl_start_qvec(struct atl_queue_vec *qvec) ret = atl_init_rx_ring(rx); if (ret) return ret; - ret = atl_init_tx_ring(tx); - if (ret) - return ret; + if (likely(qvec->type != ATL_QUEUE_HWTS)) { + ret = atl_init_tx_ring(tx); + if (ret) + return ret; + } /* Map ring interrups into corresponding cause bit*/ atl_set_intr_bits(hw, qvec->idx, intr, intr); atl_set_intr_throttle(qvec); - napi_enable(&qvec->napi); + if (likely(qvec->type == ATL_QUEUE_REGULAR)) + napi_enable(&qvec->napi); atl_set_intr_mod_qvec(qvec); - atl_intr_enable(hw, BIT(atl_qvec_intr(qvec))); + atl_intr_enable(hw, BIT(intr)); - atl_start_tx_ring(tx); + if (likely(qvec->type != ATL_QUEUE_HWTS)) + atl_start_tx_ring(tx); atl_start_rx_ring(rx); return 0; } -static void atl_stop_qvec(struct atl_queue_vec *qvec) +void atl_stop_qvec(struct atl_queue_vec *qvec) { struct atl_desc_ring *rx = &qvec->rx; struct atl_desc_ring *tx = &qvec->tx; @@ -1764,16 +1995,21 @@ static void atl_stop_qvec(struct atl_queue_vec *qvec) /* Disable and reset rings */ atl_write(hw, ATL_RING_CTL(rx), BIT(25)); - atl_write(hw, ATL_RING_CTL(tx), BIT(25)); + if (likely(qvec->type != ATL_QUEUE_HWTS)) + atl_write(hw, ATL_RING_CTL(tx), BIT(25)); udelay(10); atl_write(hw, ATL_RING_CTL(rx), 0); - atl_write(hw, ATL_RING_CTL(tx), 0); + if (likely(qvec->type != ATL_QUEUE_HWTS)) + atl_write(hw, ATL_RING_CTL(tx), 0); atl_intr_disable(hw, BIT(atl_qvec_intr(qvec))); - napi_disable(&qvec->napi); + if (likely(qvec->type == ATL_QUEUE_REGULAR)) + napi_disable(&qvec->napi); - atl_clear_rx_bufs(rx); - atl_free_tx_bufs(tx); + if (likely(qvec->type != ATL_QUEUE_HWTS)) { + atl_clear_rx_bufs(rx); + atl_free_tx_bufs(tx); + } } static void atl_set_lro(struct atl_nic *nic) diff --git a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_ring.h b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_ring.h index 88e0d0ca6838b4ff271271b803af8d4a3da5bfe9..e35bab18a647429487ff0812fa7a49b4011d95ab 100644 --- a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_ring.h +++ b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_ring.h @@ -17,6 +17,7 @@ #include "atl_common.h" #include "atl_desc.h" #include "atl_ring_desc.h" +#include "atl_ptp.h" //#define ATL_RINGS_IN_UC_MEM @@ -67,13 +68,13 @@ #define bump_tail(ring, amount) do { \ struct atl_desc_ring *__ring = (ring); \ uint32_t __ptr = READ_ONCE(__ring->tail); \ - WRITE_ONCE(__ring->tail, offset_ptr(__ptr, &__ring->hw, amount));\ + __ring->tail = offset_ptr(__ptr, &__ring->hw, amount);\ } while (0) #define bump_head(ring, amount) do { \ struct atl_desc_ring *__ring = (ring); \ uint32_t __ptr = READ_ONCE(__ring->head); \ - WRITE_ONCE(__ring->head, offset_ptr(__ptr, &__ring->hw, amount));\ + __ring->head = offset_ptr(__ptr, &__ring->hw, amount);\ } while (0) struct atl_rxpage { @@ -120,6 +121,12 @@ static inline struct legacy_irq_work *to_irq_work(struct work_struct *work) return container_of(work, struct legacy_irq_work, work); }; +enum atl_queue_type { + ATL_QUEUE_REGULAR, + ATL_QUEUE_PTP, + ATL_QUEUE_HWTS, +}; + struct ____cacheline_aligned atl_queue_vec { struct atl_desc_ring tx; struct atl_desc_ring rx; @@ -128,6 +135,7 @@ struct ____cacheline_aligned atl_queue_vec { unsigned idx; char name[IFNAMSIZ + 10]; cpumask_t affinity_hint; + enum atl_queue_type type; struct work_struct *work; }; @@ -140,8 +148,20 @@ static inline struct atl_hw *ring_hw(struct atl_desc_ring *ring) return &ring->nic->hw; } +void atl_init_qvec(struct atl_nic *nic, struct atl_queue_vec *qvec, int idx); +int atl_alloc_qvec(struct atl_queue_vec *qvec); +void atl_free_qvec(struct atl_queue_vec *qvec); +int atl_start_qvec(struct atl_queue_vec *qvec); +void atl_stop_qvec(struct atl_queue_vec *qvec); +int atl_poll_qvec(struct atl_queue_vec *qvec, int budget); + static inline int atl_qvec_intr(struct atl_queue_vec *qvec) { +#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK) + if (unlikely(qvec->idx >= qvec->nic->nvecs)) + return atl_ptp_qvec_intr(qvec); +#endif + return qvec->idx + ATL_NUM_NON_RING_IRQS; } @@ -169,10 +189,14 @@ do { \ int atl_init_rx_ring(struct atl_desc_ring *rx); int atl_init_tx_ring(struct atl_desc_ring *tx); +netdev_tx_t atl_map_skb(struct sk_buff *skb, struct atl_desc_ring *ring); +int atl_tx_full(struct atl_desc_ring *ring, int needed); + typedef int (*rx_skb_handler_t)(struct atl_desc_ring *ring, struct sk_buff *skb); int atl_clean_rx(struct atl_desc_ring *ring, int budget, rx_skb_handler_t rx_skb_func); +int atl_clean_hwts_rx(struct atl_desc_ring *ring, int budget); void atl_clear_rx_bufs(struct atl_desc_ring *ring); #ifdef ATL_RINGS_IN_UC_MEM @@ -180,7 +204,7 @@ void atl_clear_rx_bufs(struct atl_desc_ring *ring); #define DECLARE_SCRATCH_DESC(_name) union atl_desc _name #define DESC_PTR(_ring, _idx, _scratch) (&(_scratch)) #define COMMIT_DESC(_ring, _idx, _scratch) \ - WRITE_ONCE((_ring)->hw.descs[_idx], (_scratch)) + (_ring)->hw.descs[_idx] = (_scratch) #define FETCH_DESC(_ring, _idx, _scratch) \ do { \ (_scratch) = READ_ONCE((_ring)->hw.descs[_idx]); \ diff --git a/drivers/net/ethernet/aquantia/atlantic-fwd/release_notes.txt b/drivers/net/ethernet/aquantia/atlantic-fwd/release_notes.txt index 5fac6ea6eb45f739a6a8e957fe020f5b6424bb31..0dc6a283b771ba8fea57b61f860dd2fe010a6cd8 100644 --- a/drivers/net/ethernet/aquantia/atlantic-fwd/release_notes.txt +++ b/drivers/net/ethernet/aquantia/atlantic-fwd/release_notes.txt @@ -1,9 +1,26 @@ -Version 1.1.7 +Version 1.1.10 +============== +[ATLDRV-1559] - Add support of icmp proto to the rx filters +[ATLDRV-1401] - A2: Filtering +[ATLDRV-1513] - PM runtime failure + +Version 1.1.9 ============= +[ATLDRV-1505] - FWD: New dev ids +[ATLDRV-1529] - FWD: Segmentation fault occurs for user operations +[ATLDRV-1542] - FWD: build fails on 5.8-rcX +[ATLDRV-1552] - Fwd: build fails with old gcc +[ATLDRV-1553] - Fwd: BUG message when LOCKDEP is enabled - [ATLDRV-1469] - AQ083: After suspend resume EAPOL frames is not received - [ATLDRV-1497] - Incorrect temperature is displayed in hwmon - [ATLDRV-1519] - AQ086: Configuring default ring size +Version 1.1.8 +============= +[ATLDRV-1438] - Fwd: port PTP from Linux driver + +Version 1.1.7 +============= +[ATLDRV-1469] - AQ083: After suspend resume EAPOL frames is not received +[ATLDRV-1497] - Incorrect temperature is displayed in hwmon +[ATLDRV-1519] - AQ086: Configuring default ring size Version 1.1.6 ============= diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index 5e5022fa1d047be078be911bc4f6cd0631f04de7..85029d43da7589d417355e8ce102fbae27f79eef 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c @@ -1250,8 +1250,12 @@ static int __alx_open(struct alx_priv *alx, bool resume) static void __alx_stop(struct alx_priv *alx) { - alx_halt(alx); alx_free_irq(alx); + + cancel_work_sync(&alx->link_check_wk); + cancel_work_sync(&alx->reset_wk); + + alx_halt(alx); alx_free_rings(alx); alx_free_napis(alx); } @@ -1863,9 +1867,6 @@ static void alx_remove(struct pci_dev *pdev) struct alx_priv *alx = pci_get_drvdata(pdev); struct alx_hw *hw = &alx->hw; - cancel_work_sync(&alx->link_check_wk); - cancel_work_sync(&alx->reset_wk); - /* restore permanent mac address */ alx_set_macaddr(hw, hw->perm_addr); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 5163da01e54f8d0688cc77e38ac2c23135c24680..a189061d8f97e32101082dc622a30aa28473fba3 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -6827,6 +6827,7 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev, netdev_features_t features) { struct bnxt *bp = netdev_priv(dev); + netdev_features_t vlan_features; if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp)) features &= ~NETIF_F_NTUPLE; @@ -6834,12 +6835,14 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev, /* Both CTAG and STAG VLAN accelaration on the RX side have to be * turned on or off together. */ - if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) != - (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) { + vlan_features = features & (NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_STAG_RX); + if (vlan_features != (NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_STAG_RX)) { if (dev->features & NETIF_F_HW_VLAN_CTAG_RX) features &= ~(NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX); - else + else if (vlan_features) features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX; } @@ -8420,8 +8423,11 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev) } } - if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev)) - dev_close(netdev); + if (result != PCI_ERS_RESULT_RECOVERED) { + if (netif_running(netdev)) + dev_close(netdev); + pci_disable_device(pdev); + } rtnl_unlock(); @@ -8432,7 +8438,7 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev) err); /* non-fatal, continue */ } - return PCI_ERS_RESULT_RECOVERED; + return result; } /** diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index c911e69ff25f67d0402b1f06e48de7245cea8a88..5aaf7f5a23dcb66afdfd13f6c6a8f688aae1244e 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -774,7 +774,6 @@ struct bnxt_vf_info { #define BNXT_VF_SPOOFCHK 0x2 #define BNXT_VF_LINK_FORCED 0x4 #define BNXT_VF_LINK_UP 0x8 - u32 func_flags; /* func cfg flags */ u32 min_tx_rate; u32 max_tx_rate; void *hwrm_cmd_req_addr; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 3c78cd1cdd6fb7ce5caddabf063816fda069aaa6..6edbbfc1709a24cab042eec3a27e5dfa83a38793 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -1287,8 +1287,11 @@ static int bnxt_set_pauseparam(struct net_device *dev, if (epause->tx_pause) link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX; - if (netif_running(dev)) + if (netif_running(dev)) { + mutex_lock(&bp->link_lock); rc = bnxt_hwrm_set_pause(bp); + mutex_unlock(&bp->link_lock); + } return rc; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index c9617675f934b615b05f13e4e74c8d9f7aeeaabb..f0bc8f5246c0a9368aa5b50f598f4656a44e2a3f 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -99,11 +99,10 @@ int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting) if (old_setting == setting) return 0; - func_flags = vf->func_flags; if (setting) - func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE; + func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE; else - func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE; + func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE; /*TODO: if the driver supports VLAN filter on guest VLAN, * the spoof check should also include vlan anti-spoofing */ @@ -112,7 +111,6 @@ int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting) req.flags = cpu_to_le32(func_flags); rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (!rc) { - vf->func_flags = func_flags; if (setting) vf->flags |= BNXT_VF_SPOOFCHK; else @@ -176,7 +174,6 @@ int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac) memcpy(vf->mac_addr, mac, ETH_ALEN); bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); req.fid = cpu_to_le16(vf->fw_fid); - req.flags = cpu_to_le32(vf->func_flags); req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR); memcpy(req.dflt_mac_addr, mac, ETH_ALEN); return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); @@ -214,7 +211,6 @@ int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos, bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); req.fid = cpu_to_le16(vf->fw_fid); - req.flags = cpu_to_le32(vf->func_flags); req.dflt_vlan = cpu_to_le16(vlan_tag); req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN); rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); @@ -253,7 +249,6 @@ int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate, return 0; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); req.fid = cpu_to_le16(vf->fw_fid); - req.flags = cpu_to_le32(vf->func_flags); req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW); req.max_bw = cpu_to_le32(max_tx_rate); req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW); @@ -349,6 +344,7 @@ static void bnxt_free_vf_resources(struct bnxt *bp) } } + bp->pf.active_vfs = 0; kfree(bp->pf.vf); bp->pf.vf = NULL; } @@ -613,7 +609,6 @@ void bnxt_sriov_disable(struct bnxt *bp) bnxt_free_vf_resources(bp); - bp->pf.active_vfs = 0; /* Reclaim all resources for the PF. */ rtnl_lock(); bnxt_restore_pf_fw_resources(bp); diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 38391230ca8604fded464552868d3b8ca44f470b..8bfa2523e25333f89028ce7cd6b02bda2fb39790 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -72,6 +72,9 @@ #define GENET_RDMA_REG_OFF (priv->hw_params->rdma_offset + \ TOTAL_DESC * DMA_DESC_SIZE) +/* Forward declarations */ +static void bcmgenet_set_rx_mode(struct net_device *dev); + static inline void bcmgenet_writel(u32 value, void __iomem *offset) { /* MIPS chips strapped for BE will automagically configure the @@ -1564,11 +1567,6 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) goto out; } - if (skb_padto(skb, ETH_ZLEN)) { - ret = NETDEV_TX_OK; - goto out; - } - /* Retain how many bytes will be sent on the wire, without TSB inserted * by transmit checksum offload */ @@ -1618,6 +1616,9 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) len_stat = (size << DMA_BUFLENGTH_SHIFT) | (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT); + /* Note: if we ever change from DMA_TX_APPEND_CRC below we + * will need to restore software padding of "runt" packets + */ if (!i) { len_stat |= DMA_TX_APPEND_CRC | DMA_SOP; if (skb->ip_summed == CHECKSUM_PARTIAL) @@ -2858,6 +2859,7 @@ static void bcmgenet_netif_start(struct net_device *dev) struct bcmgenet_priv *priv = netdev_priv(dev); /* Start the network engine */ + bcmgenet_set_rx_mode(dev); bcmgenet_enable_rx_napi(priv); bcmgenet_enable_tx_napi(priv); diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index bc0221eafe5c4e9a5fb169de3a442a5cf5caae87..e40d31b4052530776196104c935c90e5f4549ed6 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -18179,8 +18179,8 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev, rtnl_lock(); - /* We probably don't have netdev yet */ - if (!netdev || !netif_running(netdev)) + /* Could be second call or maybe we don't have netdev yet */ + if (!netdev || tp->pcierr_recovery || !netif_running(netdev)) goto done; /* We needn't recover from permanent error */ diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index b01b242c2bf00430a1aac057e9a26ec384783866..4d2a996ba446098f45bbe20de34f8bb1cdf793f0 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -3516,7 +3516,7 @@ static int macb_probe(struct platform_device *pdev) bp->wol = 0; if (of_get_property(np, "magic-packet", NULL)) bp->wol |= MACB_WOL_HAS_MAGIC_PACKET; - device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET); + device_set_wakeup_capable(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET); spin_lock_init(&bp->lock); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 0e13989608f19467ba18933aa9d51f3835e44c2c..6eb65b870da743241cea484ad8483cd43bdef70e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -2241,8 +2241,6 @@ static int cxgb_up(struct adapter *adap) #if IS_ENABLED(CONFIG_IPV6) update_clip(adap); #endif - /* Initialize hash mac addr list*/ - INIT_LIST_HEAD(&adap->mac_hlist); return err; irq_err: @@ -2264,6 +2262,7 @@ static void cxgb_down(struct adapter *adapter) t4_sge_stop(adapter); t4_free_sge_resources(adapter); + adapter->flags &= ~FULL_INIT_DONE; } @@ -4962,6 +4961,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) (is_t5(adapter->params.chip) ? STATMODE_V(0) : T6_STATMODE_V(0))); + /* Initialize hash mac addr list */ + INIT_LIST_HEAD(&adapter->mac_hlist); + for_each_port(adapter, i) { netdev = alloc_etherdev_mq(sizeof(struct port_info), MAX_ETH_QSETS); @@ -5252,6 +5254,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) static void remove_one(struct pci_dev *pdev) { struct adapter *adapter = pci_get_drvdata(pdev); + struct hash_mac_addr *entry, *tmp; if (!adapter) { pci_release_regions(pdev); @@ -5295,6 +5298,12 @@ static void remove_one(struct pci_dev *pdev) if (adapter->num_uld || adapter->num_ofld_uld) t4_uld_mem_free(adapter); free_some_resources(adapter); + list_for_each_entry_safe(entry, tmp, &adapter->mac_hlist, + list) { + list_del(&entry->list); + kfree(entry); + } + #if IS_ENABLED(CONFIG_IPV6) t4_cleanup_clip_tbl(adapter); #endif diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c index 48970ba08bdc14623f0d15866731169e5755c7a4..de5804ddefbd01e71c09cdd842fd0edb0e5cadd4 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c @@ -47,7 +47,7 @@ static int fill_match_fields(struct adapter *adap, bool next_header) { unsigned int i, j; - u32 val, mask; + __be32 val, mask; int off, err; bool found; @@ -217,7 +217,7 @@ int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls) const struct cxgb4_next_header *next; bool found = false; unsigned int i, j; - u32 val, mask; + __be32 val, mask; int off; if (t->table[link_uhtid - 1].link_handle) { @@ -231,10 +231,10 @@ int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls) /* Try to find matches that allow jumps to next header. */ for (i = 0; next[i].jump; i++) { - if (next[i].offoff != cls->knode.sel->offoff || - next[i].shift != cls->knode.sel->offshift || - next[i].mask != cls->knode.sel->offmask || - next[i].offset != cls->knode.sel->off) + if (next[i].sel.offoff != cls->knode.sel->offoff || + next[i].sel.offshift != cls->knode.sel->offshift || + next[i].sel.offmask != cls->knode.sel->offmask || + next[i].sel.off != cls->knode.sel->off) continue; /* Found a possible candidate. Find a key that @@ -246,9 +246,9 @@ int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls) val = cls->knode.sel->keys[j].val; mask = cls->knode.sel->keys[j].mask; - if (next[i].match_off == off && - next[i].match_val == val && - next[i].match_mask == mask) { + if (next[i].key.off == off && + next[i].key.val == val && + next[i].key.mask == mask) { found = true; break; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h index a4b99edcc3399374784ba9b99d2b51a05d6b2694..141085e159e57e9553d3141bb0af56a8668d9a2c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h @@ -38,12 +38,12 @@ struct cxgb4_match_field { int off; /* Offset from the beginning of the header to match */ /* Fill the value/mask pair in the spec if matched */ - int (*val)(struct ch_filter_specification *f, u32 val, u32 mask); + int (*val)(struct ch_filter_specification *f, __be32 val, __be32 mask); }; /* IPv4 match fields */ static inline int cxgb4_fill_ipv4_tos(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { f->val.tos = (ntohl(val) >> 16) & 0x000000FF; f->mask.tos = (ntohl(mask) >> 16) & 0x000000FF; @@ -52,7 +52,7 @@ static inline int cxgb4_fill_ipv4_tos(struct ch_filter_specification *f, } static inline int cxgb4_fill_ipv4_frag(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { u32 mask_val; u8 frag_val; @@ -74,7 +74,7 @@ static inline int cxgb4_fill_ipv4_frag(struct ch_filter_specification *f, } static inline int cxgb4_fill_ipv4_proto(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { f->val.proto = (ntohl(val) >> 16) & 0x000000FF; f->mask.proto = (ntohl(mask) >> 16) & 0x000000FF; @@ -83,7 +83,7 @@ static inline int cxgb4_fill_ipv4_proto(struct ch_filter_specification *f, } static inline int cxgb4_fill_ipv4_src_ip(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { memcpy(&f->val.fip[0], &val, sizeof(u32)); memcpy(&f->mask.fip[0], &mask, sizeof(u32)); @@ -92,7 +92,7 @@ static inline int cxgb4_fill_ipv4_src_ip(struct ch_filter_specification *f, } static inline int cxgb4_fill_ipv4_dst_ip(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { memcpy(&f->val.lip[0], &val, sizeof(u32)); memcpy(&f->mask.lip[0], &mask, sizeof(u32)); @@ -111,7 +111,7 @@ static const struct cxgb4_match_field cxgb4_ipv4_fields[] = { /* IPv6 match fields */ static inline int cxgb4_fill_ipv6_tos(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { f->val.tos = (ntohl(val) >> 20) & 0x000000FF; f->mask.tos = (ntohl(mask) >> 20) & 0x000000FF; @@ -120,7 +120,7 @@ static inline int cxgb4_fill_ipv6_tos(struct ch_filter_specification *f, } static inline int cxgb4_fill_ipv6_proto(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { f->val.proto = (ntohl(val) >> 8) & 0x000000FF; f->mask.proto = (ntohl(mask) >> 8) & 0x000000FF; @@ -129,7 +129,7 @@ static inline int cxgb4_fill_ipv6_proto(struct ch_filter_specification *f, } static inline int cxgb4_fill_ipv6_src_ip0(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { memcpy(&f->val.fip[0], &val, sizeof(u32)); memcpy(&f->mask.fip[0], &mask, sizeof(u32)); @@ -138,7 +138,7 @@ static inline int cxgb4_fill_ipv6_src_ip0(struct ch_filter_specification *f, } static inline int cxgb4_fill_ipv6_src_ip1(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { memcpy(&f->val.fip[4], &val, sizeof(u32)); memcpy(&f->mask.fip[4], &mask, sizeof(u32)); @@ -147,7 +147,7 @@ static inline int cxgb4_fill_ipv6_src_ip1(struct ch_filter_specification *f, } static inline int cxgb4_fill_ipv6_src_ip2(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { memcpy(&f->val.fip[8], &val, sizeof(u32)); memcpy(&f->mask.fip[8], &mask, sizeof(u32)); @@ -156,7 +156,7 @@ static inline int cxgb4_fill_ipv6_src_ip2(struct ch_filter_specification *f, } static inline int cxgb4_fill_ipv6_src_ip3(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { memcpy(&f->val.fip[12], &val, sizeof(u32)); memcpy(&f->mask.fip[12], &mask, sizeof(u32)); @@ -165,7 +165,7 @@ static inline int cxgb4_fill_ipv6_src_ip3(struct ch_filter_specification *f, } static inline int cxgb4_fill_ipv6_dst_ip0(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { memcpy(&f->val.lip[0], &val, sizeof(u32)); memcpy(&f->mask.lip[0], &mask, sizeof(u32)); @@ -174,7 +174,7 @@ static inline int cxgb4_fill_ipv6_dst_ip0(struct ch_filter_specification *f, } static inline int cxgb4_fill_ipv6_dst_ip1(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { memcpy(&f->val.lip[4], &val, sizeof(u32)); memcpy(&f->mask.lip[4], &mask, sizeof(u32)); @@ -183,7 +183,7 @@ static inline int cxgb4_fill_ipv6_dst_ip1(struct ch_filter_specification *f, } static inline int cxgb4_fill_ipv6_dst_ip2(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { memcpy(&f->val.lip[8], &val, sizeof(u32)); memcpy(&f->mask.lip[8], &mask, sizeof(u32)); @@ -192,7 +192,7 @@ static inline int cxgb4_fill_ipv6_dst_ip2(struct ch_filter_specification *f, } static inline int cxgb4_fill_ipv6_dst_ip3(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { memcpy(&f->val.lip[12], &val, sizeof(u32)); memcpy(&f->mask.lip[12], &mask, sizeof(u32)); @@ -216,7 +216,7 @@ static const struct cxgb4_match_field cxgb4_ipv6_fields[] = { /* TCP/UDP match */ static inline int cxgb4_fill_l4_ports(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { f->val.fport = ntohl(val) >> 16; f->mask.fport = ntohl(mask) >> 16; @@ -237,19 +237,13 @@ static const struct cxgb4_match_field cxgb4_udp_fields[] = { }; struct cxgb4_next_header { - unsigned int offset; /* Offset to next header */ - /* offset, shift, and mask added to offset above + /* Offset, shift, and mask added to beginning of the header * to get to next header. Useful when using a header * field's value to jump to next header such as IHL field * in IPv4 header. */ - unsigned int offoff; - u32 shift; - u32 mask; - /* match criteria to make this jump */ - unsigned int match_off; - u32 match_val; - u32 match_mask; + struct tc_u32_sel sel; + struct tc_u32_key key; /* location of jump to make */ const struct cxgb4_match_field *jump; }; @@ -258,26 +252,74 @@ struct cxgb4_next_header { * IPv4 header. */ static const struct cxgb4_next_header cxgb4_ipv4_jumps[] = { - { .offset = 0, .offoff = 0, .shift = 6, .mask = 0xF, - .match_off = 8, .match_val = 0x600, .match_mask = 0xFF00, - .jump = cxgb4_tcp_fields }, - { .offset = 0, .offoff = 0, .shift = 6, .mask = 0xF, - .match_off = 8, .match_val = 0x1100, .match_mask = 0xFF00, - .jump = cxgb4_udp_fields }, - { .jump = NULL } + { + /* TCP Jump */ + .sel = { + .off = 0, + .offoff = 0, + .offshift = 6, + .offmask = cpu_to_be16(0x0f00), + }, + .key = { + .off = 8, + .val = cpu_to_be32(0x00060000), + .mask = cpu_to_be32(0x00ff0000), + }, + .jump = cxgb4_tcp_fields, + }, + { + /* UDP Jump */ + .sel = { + .off = 0, + .offoff = 0, + .offshift = 6, + .offmask = cpu_to_be16(0x0f00), + }, + .key = { + .off = 8, + .val = cpu_to_be32(0x00110000), + .mask = cpu_to_be32(0x00ff0000), + }, + .jump = cxgb4_udp_fields, + }, + { .jump = NULL }, }; /* Accept a rule with a jump directly past the 40 Bytes of IPv6 fixed header * to get to transport layer header. */ static const struct cxgb4_next_header cxgb4_ipv6_jumps[] = { - { .offset = 0x28, .offoff = 0, .shift = 0, .mask = 0, - .match_off = 4, .match_val = 0x60000, .match_mask = 0xFF0000, - .jump = cxgb4_tcp_fields }, - { .offset = 0x28, .offoff = 0, .shift = 0, .mask = 0, - .match_off = 4, .match_val = 0x110000, .match_mask = 0xFF0000, - .jump = cxgb4_udp_fields }, - { .jump = NULL } + { + /* TCP Jump */ + .sel = { + .off = 40, + .offoff = 0, + .offshift = 0, + .offmask = 0, + }, + .key = { + .off = 4, + .val = cpu_to_be32(0x00000600), + .mask = cpu_to_be32(0x0000ff00), + }, + .jump = cxgb4_tcp_fields, + }, + { + /* UDP Jump */ + .sel = { + .off = 40, + .offoff = 0, + .offshift = 0, + .offmask = 0, + }, + .key = { + .off = 4, + .val = cpu_to_be32(0x00001100), + .mask = cpu_to_be32(0x0000ff00), + }, + .jump = cxgb4_udp_fields, + }, + { .jump = NULL }, }; struct cxgb4_link { diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 4ef68f69b58c45322d65f414e06d068ade4ab22d..0a5c4c7da505295e0ee38550d3de7f069f84d593 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -2088,7 +2088,7 @@ static noinline int t4_systim_to_hwstamp(struct adapter *adapter, hwtstamps = skb_hwtstamps(skb); memset(hwtstamps, 0, sizeof(*hwtstamps)); - hwtstamps->hwtstamp = ns_to_ktime(be64_to_cpu(*((u64 *)data))); + hwtstamps->hwtstamp = ns_to_ktime(get_unaligned_be64(data)); return RX_PTP_PKT_SUC; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 0f126ce4645f30c6f51130271dcff33ed400f33a..ecb8ef4a756fcc6274af54deb024de8218e3ad17 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -3361,7 +3361,7 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info, drv_fw = &fw_info->fw_hdr; /* Read the header of the firmware on the card */ - ret = -t4_read_flash(adap, FLASH_FW_START, + ret = t4_read_flash(adap, FLASH_FW_START, sizeof(*card_fw) / sizeof(uint32_t), (uint32_t *)card_fw, 1); if (ret == 0) { @@ -3390,8 +3390,8 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info, should_install_fs_fw(adap, card_fw_usable, be32_to_cpu(fs_fw->fw_ver), be32_to_cpu(card_fw->fw_ver))) { - ret = -t4_fw_upgrade(adap, adap->mbox, fw_data, - fw_size, 0); + ret = t4_fw_upgrade(adap, adap->mbox, fw_data, + fw_size, 0); if (ret != 0) { dev_err(adap->pdev_dev, "failed to install firmware: %d\n", ret); @@ -3422,7 +3422,7 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info, FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k), FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k)); - ret = EINVAL; + ret = -EINVAL; goto bye; } diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index 26ba18ea08c6af95b74b71b16eeb6e024ddb63f2..fa116f0a107dbc74c5c3012aca3b61ac9cfe41a2 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -715,9 +715,6 @@ static int adapter_up(struct adapter *adapter) if (adapter->flags & USING_MSIX) name_msix_vecs(adapter); - /* Initialize hash mac addr list*/ - INIT_LIST_HEAD(&adapter->mac_hlist); - adapter->flags |= FULL_INIT_DONE; } @@ -2936,6 +2933,9 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev, if (err) goto err_unmap_bar; + /* Initialize hash mac addr list */ + INIT_LIST_HEAD(&adapter->mac_hlist); + /* * Allocate our "adapter ports" and stitch everything together. */ diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig index 6e490fd2345dae47b687082101f12bd2523dbde3..71f0640200bc7604c26693304b5a8b61b9d60017 100644 --- a/drivers/net/ethernet/freescale/Kconfig +++ b/drivers/net/ethernet/freescale/Kconfig @@ -76,6 +76,7 @@ config UCC_GETH depends on QUICC_ENGINE select FSL_PQ_MDIO select PHYLIB + select FIXED_PHY ---help--- This driver supports the Gigabit Ethernet mode of the QUICC Engine, which is available on some Freescale SOCs. @@ -89,6 +90,7 @@ config GIANFAR depends on HAS_DMA select FSL_PQ_MDIO select PHYLIB + select FIXED_PHY select CRC32 ---help--- This driver supports the Gigabit TSEC on the MPC83xx, MPC85xx, diff --git a/drivers/net/ethernet/freescale/dpaa/Kconfig b/drivers/net/ethernet/freescale/dpaa/Kconfig index a654736237a9c250bf4ac39d6237c2bd1f154744..8fec41e571789dd3f3c991cfbf5b49df893e754a 100644 --- a/drivers/net/ethernet/freescale/dpaa/Kconfig +++ b/drivers/net/ethernet/freescale/dpaa/Kconfig @@ -2,6 +2,7 @@ menuconfig FSL_DPAA_ETH tristate "DPAA Ethernet" depends on FSL_DPAA && FSL_FMAN select PHYLIB + select FIXED_PHY select FSL_FMAN_MAC ---help--- Data Path Acceleration Architecture Ethernet driver, diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index bddf4c25ee6ea79d1808a2f0a280ba2ad18da1d4..7c2a9fd4dc1a059c60966e86338ea0faec89e981 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c @@ -45,6 +45,7 @@ #include #include #include +#include #include "ucc_geth.h" @@ -1551,11 +1552,8 @@ static int ugeth_disable(struct ucc_geth_private *ugeth, enum comm_dir mode) static void ugeth_quiesce(struct ucc_geth_private *ugeth) { - /* Prevent any further xmits, plus detach the device. */ - netif_device_detach(ugeth->ndev); - - /* Wait for any current xmits to finish. */ - netif_tx_disable(ugeth->ndev); + /* Prevent any further xmits */ + netif_tx_stop_all_queues(ugeth->ndev); /* Disable the interrupt to avoid NAPI rescheduling. */ disable_irq(ugeth->ug_info->uf_info.irq); @@ -1568,7 +1566,10 @@ static void ugeth_activate(struct ucc_geth_private *ugeth) { napi_enable(&ugeth->napi); enable_irq(ugeth->ug_info->uf_info.irq); - netif_device_attach(ugeth->ndev); + + /* allow to xmit again */ + netif_tx_wake_all_queues(ugeth->ndev); + __netdev_watchdog_up(ugeth->ndev); } /* Called every time the controller might need to be made diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c index 7d0f3cd8a002babbca711d24115a8373b06429f1..9e1e2d5b80dcaeb3b6d815c25a366bb077ccd5d7 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c @@ -2770,7 +2770,7 @@ static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port) struct hns_mac_cb *mac_cb; u8 addr[ETH_ALEN] = {0}; u8 port_num; - u16 mskid; + int mskid; /* promisc use vague table match with vlanid = 0 & macaddr = 0 */ hns_dsaf_set_mac_key(dsaf_dev, &mac_key, 0x00, port, addr); @@ -3142,6 +3142,9 @@ int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset) dsaf_set_bit(credit, DSAF_SBM_ROCEE_CFG_CRD_EN_B, 1); dsaf_write_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG, credit); } + + put_device(&pdev->dev); + return 0; } EXPORT_SYMBOL(hns_dsaf_roce_reset); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c index 9fcf2e5e000395100a7977f02dfb3b66c02307eb..0e40d647093cd699060740c58ddebe7a51a4f972 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c @@ -54,6 +54,8 @@ #define MGMT_MSG_TIMEOUT 5000 +#define SET_FUNC_PORT_MGMT_TIMEOUT 25000 + #define mgmt_to_pfhwdev(pf_mgmt) \ container_of(pf_mgmt, struct hinic_pfhwdev, pf_to_mgmt) @@ -247,12 +249,13 @@ static int msg_to_mgmt_sync(struct hinic_pf_to_mgmt *pf_to_mgmt, u8 *buf_in, u16 in_size, u8 *buf_out, u16 *out_size, enum mgmt_direction_type direction, - u16 resp_msg_id) + u16 resp_msg_id, u32 timeout) { struct hinic_hwif *hwif = pf_to_mgmt->hwif; struct pci_dev *pdev = hwif->pdev; struct hinic_recv_msg *recv_msg; struct completion *recv_done; + unsigned long timeo; u16 msg_id; int err; @@ -276,8 +279,9 @@ static int msg_to_mgmt_sync(struct hinic_pf_to_mgmt *pf_to_mgmt, goto unlock_sync_msg; } - if (!wait_for_completion_timeout(recv_done, - msecs_to_jiffies(MGMT_MSG_TIMEOUT))) { + timeo = msecs_to_jiffies(timeout ? timeout : MGMT_MSG_TIMEOUT); + + if (!wait_for_completion_timeout(recv_done, timeo)) { dev_err(&pdev->dev, "MGMT timeout, MSG id = %d\n", msg_id); err = -ETIMEDOUT; goto unlock_sync_msg; @@ -351,6 +355,7 @@ int hinic_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt, { struct hinic_hwif *hwif = pf_to_mgmt->hwif; struct pci_dev *pdev = hwif->pdev; + u32 timeout = 0; if (sync != HINIC_MGMT_MSG_SYNC) { dev_err(&pdev->dev, "Invalid MGMT msg type\n"); @@ -362,9 +367,12 @@ int hinic_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt, return -EINVAL; } + if (cmd == HINIC_PORT_CMD_SET_FUNC_STATE) + timeout = SET_FUNC_PORT_MGMT_TIMEOUT; + return msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size, buf_out, out_size, MGMT_DIRECT_SEND, - MSG_NOT_RESP); + MSG_NOT_RESP, timeout); } /** diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c index 44c73215d0264e4398446c5f047f98b1e7192926..13ed3923c6887fd027265111f399fa863c4a0061 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_main.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c @@ -473,7 +473,6 @@ static int hinic_close(struct net_device *netdev) { struct hinic_dev *nic_dev = netdev_priv(netdev); unsigned int flags; - int err; down(&nic_dev->mgmt_lock); @@ -487,20 +486,9 @@ static int hinic_close(struct net_device *netdev) up(&nic_dev->mgmt_lock); - err = hinic_port_set_func_state(nic_dev, HINIC_FUNC_PORT_DISABLE); - if (err) { - netif_err(nic_dev, drv, netdev, - "Failed to set func port state\n"); - nic_dev->flags |= (flags & HINIC_INTF_UP); - return err; - } + hinic_port_set_state(nic_dev, HINIC_PORT_DISABLE); - err = hinic_port_set_state(nic_dev, HINIC_PORT_DISABLE); - if (err) { - netif_err(nic_dev, drv, netdev, "Failed to set port state\n"); - nic_dev->flags |= (flags & HINIC_INTF_UP); - return err; - } + hinic_port_set_func_state(nic_dev, HINIC_FUNC_PORT_DISABLE); free_rxqs(nic_dev); free_txqs(nic_dev); diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 880d925438c17fa58ec214677c71d2e63d613f3e..b43aebfc7f5be78f20775148c471b222393864fc 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -1695,7 +1695,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) } netdev->min_mtu = IBMVETH_MIN_MTU; - netdev->max_mtu = ETH_MAX_MTU; + netdev->max_mtu = ETH_MAX_MTU - IBMVETH_BUFF_OH; memcpy(netdev->dev_addr, mac_addr_p, ETH_ALEN); diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 956fbb164e6fbf866c9aa2d7074d150a54d258e8..85c11dafb4cd99532e763997734a64091036cb28 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -3560,12 +3560,10 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq, dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc); break; } - dev_info(dev, "Partner protocol version is %d\n", - crq->version_exchange_rsp.version); - if (be16_to_cpu(crq->version_exchange_rsp.version) < - ibmvnic_version) - ibmvnic_version = + ibmvnic_version = be16_to_cpu(crq->version_exchange_rsp.version); + dev_info(dev, "Partner protocol version is %d\n", + ibmvnic_version); send_cap_queries(adapter); break; case QUERY_CAPABILITY_RSP: diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 3dd4aeb2706d393cd8fbf4998a7582d33b9bafcd..175681aa52607217a800c79fd5541054b19081f3 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -3169,8 +3169,9 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); if (skb->data_len && hdr_len == len) { switch (hw->mac_type) { + case e1000_82544: { unsigned int pull_size; - case e1000_82544: + /* Make sure we have room to chop off 4 bytes, * and that the end alignment will work out to * this hardware's requirements @@ -3191,6 +3192,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, } len = skb_headlen(skb); break; + } default: /* do nothing */ break; diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index 98e68888abb1d94a5f18616d1bed78fbe4e2742f..e0ef6007c27558a0fffd04e66137f40a5fcc965d 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -596,7 +596,6 @@ static inline u32 __er32(struct e1000_hw *hw, unsigned long reg) #define er32(reg) __er32(hw, E1000_##reg) -s32 __ew32_prepare(struct e1000_hw *hw); void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val); #define ew32(reg, val) __ew32(hw, E1000_##reg, (val)) diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 1ad345796e80b0c798d220a213fbc6f1937f4fea..fff55f0bed303ce413ee4a3fbe94bb442140235b 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -137,14 +137,12 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = { * has bit 24 set while ME is accessing MAC CSR registers, wait if it is set * and try again a number of times. **/ -s32 __ew32_prepare(struct e1000_hw *hw) +static void __ew32_prepare(struct e1000_hw *hw) { s32 i = E1000_ICH_FWSM_PCIM2PCI_COUNT; while ((er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI) && --i) udelay(50); - - return i; } void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val) @@ -625,11 +623,11 @@ static void e1000e_update_rdt_wa(struct e1000_ring *rx_ring, unsigned int i) { struct e1000_adapter *adapter = rx_ring->adapter; struct e1000_hw *hw = &adapter->hw; - s32 ret_val = __ew32_prepare(hw); + __ew32_prepare(hw); writel(i, rx_ring->tail); - if (unlikely(!ret_val && (i != readl(rx_ring->tail)))) { + if (unlikely(i != readl(rx_ring->tail))) { u32 rctl = er32(RCTL); ew32(RCTL, rctl & ~E1000_RCTL_EN); @@ -642,11 +640,11 @@ static void e1000e_update_tdt_wa(struct e1000_ring *tx_ring, unsigned int i) { struct e1000_adapter *adapter = tx_ring->adapter; struct e1000_hw *hw = &adapter->hw; - s32 ret_val = __ew32_prepare(hw); + __ew32_prepare(hw); writel(i, tx_ring->tail); - if (unlikely(!ret_val && (i != readl(tx_ring->tail)))) { + if (unlikely(i != readl(tx_ring->tail))) { u32 tctl = er32(TCTL); ew32(TCTL, tctl & ~E1000_TCTL_EN); @@ -5271,6 +5269,10 @@ static void e1000_watchdog_task(struct work_struct *work) /* oops */ break; } + if (hw->mac.type == e1000_pch_spt) { + netdev->features &= ~NETIF_F_TSO; + netdev->features &= ~NETIF_F_TSO6; + } } /* enable transmits in the hardware, need to do this @@ -6326,11 +6328,17 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) struct net_device *netdev = pci_get_drvdata(pdev); struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; - u32 ctrl, ctrl_ext, rctl, status; - /* Runtime suspend should only enable wakeup for link changes */ - u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol; + u32 ctrl, ctrl_ext, rctl, status, wufc; int retval = 0; + /* Runtime suspend should only enable wakeup for link changes */ + if (runtime) + wufc = E1000_WUFC_LNKC; + else if (device_may_wakeup(&pdev->dev)) + wufc = adapter->wol; + else + wufc = 0; + status = er32(STATUS); if (status & E1000_STATUS_LU) wufc &= ~E1000_WUFC_LNKC; @@ -6387,7 +6395,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) if (adapter->hw.phy.type == e1000_phy_igp_3) { e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); } else if (hw->mac.type >= e1000_pch_lpt) { - if (!(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC))) + if (wufc && !(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC))) /* ULP does not support wake from unicast, multicast * or broadcast. */ diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 82028ce355fb15a8b0681a6c166aa4dc24a07de4..ff2be34bff39de62b5a048ae667138ccf4ebe207 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -163,7 +163,8 @@ static int igb_get_link_ksettings(struct net_device *netdev, u32 speed; u32 supported, advertising; - status = rd32(E1000_STATUS); + status = pm_runtime_suspended(&adapter->pdev->dev) ? + 0 : rd32(E1000_STATUS); if (hw->phy.media_type == e1000_media_type_copper) { supported = (SUPPORTED_10baseT_Half | diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 815284fe93241ef29cb941465009f07323d9c8b4..6b5662674c75ebdeda975b9ccd80fe23bac95013 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -2267,7 +2267,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw) } /* Configure pause time (2 TCs per register) */ - reg = hw->fc.pause_time * 0x00010001; + reg = hw->fc.pause_time * 0x00010001U; for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++) IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index f1bfae0c41d0cd406b5eff080fd08295729755a9..3cf8b3ea43b0873a1e1261c38e267b846807bce9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -917,7 +917,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, ring->queue_index = txr_idx; /* assign ring to adapter */ - adapter->tx_ring[txr_idx] = ring; + WRITE_ONCE(adapter->tx_ring[txr_idx], ring); /* update count and index */ txr_count--; @@ -944,7 +944,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, set_ring_xdp(ring); /* assign ring to adapter */ - adapter->xdp_ring[xdp_idx] = ring; + WRITE_ONCE(adapter->xdp_ring[xdp_idx], ring); /* update count and index */ xdp_count--; @@ -991,7 +991,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, ring->queue_index = rxr_idx; /* assign ring to adapter */ - adapter->rx_ring[rxr_idx] = ring; + WRITE_ONCE(adapter->rx_ring[rxr_idx], ring); /* update count and index */ rxr_count--; @@ -1020,13 +1020,13 @@ static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx) ixgbe_for_each_ring(ring, q_vector->tx) { if (ring_is_xdp(ring)) - adapter->xdp_ring[ring->queue_index] = NULL; + WRITE_ONCE(adapter->xdp_ring[ring->queue_index], NULL); else - adapter->tx_ring[ring->queue_index] = NULL; + WRITE_ONCE(adapter->tx_ring[ring->queue_index], NULL); } ixgbe_for_each_ring(ring, q_vector->rx) - adapter->rx_ring[ring->queue_index] = NULL; + WRITE_ONCE(adapter->rx_ring[ring->queue_index], NULL); adapter->q_vector[v_idx] = NULL; napi_hash_del(&q_vector->napi); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index ba184287e11f3da76212f3e7038ef4fed5caf1b7..9c3fa0b5555196ecb1a82edafe96ab25bed0d95c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -2274,7 +2274,8 @@ static void ixgbe_rx_buffer_flip(struct ixgbe_ring *rx_ring, rx_buffer->page_offset ^= truesize; #else unsigned int truesize = ring_uses_build_skb(rx_ring) ? - SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) : + SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) : SKB_DATA_ALIGN(size); rx_buffer->page_offset += truesize; @@ -6841,7 +6842,10 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) } for (i = 0; i < adapter->num_rx_queues; i++) { - struct ixgbe_ring *rx_ring = adapter->rx_ring[i]; + struct ixgbe_ring *rx_ring = READ_ONCE(adapter->rx_ring[i]); + + if (!rx_ring) + continue; non_eop_descs += rx_ring->rx_stats.non_eop_descs; alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed; alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed; @@ -6860,15 +6864,20 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) packets = 0; /* gather some stats to the adapter struct that are per queue */ for (i = 0; i < adapter->num_tx_queues; i++) { - struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; + struct ixgbe_ring *tx_ring = READ_ONCE(adapter->tx_ring[i]); + + if (!tx_ring) + continue; restart_queue += tx_ring->tx_stats.restart_queue; tx_busy += tx_ring->tx_stats.tx_busy; bytes += tx_ring->stats.bytes; packets += tx_ring->stats.packets; } for (i = 0; i < adapter->num_xdp_queues; i++) { - struct ixgbe_ring *xdp_ring = adapter->xdp_ring[i]; + struct ixgbe_ring *xdp_ring = READ_ONCE(adapter->xdp_ring[i]); + if (!xdp_ring) + continue; restart_queue += xdp_ring->tx_stats.restart_queue; tx_busy += xdp_ring->tx_stats.tx_busy; bytes += xdp_ring->stats.bytes; diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index 5046efdad539007b4458ed3006218fe2cbe8c948..34ae4bf6e7162ec20cefb1fc1c8985ea35a49d33 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -215,7 +215,7 @@ static int __gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg, u16 *val) static inline u16 gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg) { - u16 v; + u16 v = 0; __gm_phy_read(hw, port, reg, &v); return v; } diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 7440c769b30f396a5b67e98cd46278c0139f5b74..8aecc4f4f123ca9971831a6477e14fc66588fb25 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -2715,7 +2715,7 @@ void mlx4_opreq_action(struct work_struct *work) if (err) { mlx4_err(dev, "Failed to retrieve required operation: %d\n", err); - return; + goto out; } MLX4_GET(modifier, outbox, GET_OP_REQ_MODIFIER_OFFSET); MLX4_GET(token, outbox, GET_OP_REQ_TOKEN_OFFSET); diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 12d4b891301b6108e2b97be041bd150eb54c9539..cf9011bb6e0f16220ab0f0b8a18e4ce86c40a1b1 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -2503,6 +2503,7 @@ static int mlx4_allocate_default_counters(struct mlx4_dev *dev) if (!err || err == -ENOSPC) { priv->def_counter[port] = idx; + err = 0; } else if (err == -ENOENT) { err = 0; continue; @@ -2553,7 +2554,8 @@ int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx, u8 usage) MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); if (!err) *idx = get_param_l(&out_param); - + if (WARN_ON(err == -ENOSPC)) + err = -EINVAL; return err; } return __mlx4_counter_alloc(dev, idx); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index c7654209668bdebbbb969af753ca54298fabdde0..6ae9a198737167675342ccff29ac6197f97ae8c0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -804,6 +804,7 @@ static void cmd_work_handler(struct work_struct *work) int alloc_ret; int cmd_mode; + complete(&ent->handling); sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem; down(sem); if (!ent->page_queue) { @@ -831,7 +832,6 @@ static void cmd_work_handler(struct work_struct *work) } cmd->ent_arr[ent->idx] = ent; - set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state); lay = get_inst(cmd, ent->idx); ent->lay = lay; memset(lay, 0, sizeof(*lay)); @@ -853,6 +853,7 @@ static void cmd_work_handler(struct work_struct *work) if (ent->callback) schedule_delayed_work(&ent->cb_timeout_work, cb_timeout); + set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state); /* Skip sending command to fw if internal error */ if (pci_channel_offline(dev->pdev) || @@ -865,6 +866,10 @@ static void cmd_work_handler(struct work_struct *work) MLX5_SET(mbox_out, ent->out, syndrome, drv_synd); mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true); + /* no doorbell, no need to keep the entry */ + free_ent(cmd, ent->idx); + if (ent->callback) + free_cmd(ent); return; } @@ -918,6 +923,11 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent) struct mlx5_cmd *cmd = &dev->cmd; int err; + if (!wait_for_completion_timeout(&ent->handling, timeout) && + cancel_work_sync(&ent->work)) { + ent->ret = -ECANCELED; + goto out_err; + } if (cmd->mode == CMD_MODE_POLLING || ent->polling) { wait_for_completion(&ent->done); } else if (!wait_for_completion_timeout(&ent->done, timeout)) { @@ -925,12 +935,17 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent) mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true); } +out_err: err = ent->ret; if (err == -ETIMEDOUT) { mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n", mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in)); + } else if (err == -ECANCELED) { + mlx5_core_warn(dev, "%s(0x%x) canceled on out of queue timeout.\n", + mlx5_command_str(msg_to_opcode(ent->in)), + msg_to_opcode(ent->in)); } mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n", err, deliv_status_to_str(ent->status), ent->status); @@ -966,6 +981,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, ent->token = token; ent->polling = force_polling; + init_completion(&ent->handling); if (!callback) init_completion(&ent->done); @@ -985,6 +1001,8 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, err = wait_func(dev, ent); if (err == -ETIMEDOUT) goto out; + if (err == -ECANCELED) + goto out_free; ds = ent->ts2 - ent->ts1; op = MLX5_GET(mbox_in, in->first.data, opcode); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 5103b82fe6c52d2cc1a77246a013deda8897217e..3c1719e9224aba03de207d02b318069d524f6652 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -1550,12 +1550,11 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv, #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6) struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; - int ret; - ret = ipv6_stub->ipv6_dst_lookup(dev_net(mirred_dev), NULL, &dst, - fl6); - if (ret < 0) - return ret; + dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(mirred_dev), NULL, fl6, + NULL); + if (IS_ERR(dst)) + return PTR_ERR(dst); *out_ttl = ip6_dst_hoplimit(dst); @@ -1754,7 +1753,7 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv, int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size); int ipv6_encap_size = ETH_HLEN + sizeof(struct ipv6hdr) + VXLAN_HLEN; struct ip_tunnel_key *tun_key = &e->tun_info.key; - struct net_device *out_dev; + struct net_device *out_dev = NULL; struct neighbour *n = NULL; struct flowi6 fl6 = {}; char *encap_header; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index d560047c0bf9eacbeb48200dfe7574e2d44cb12d..abdb444c6ed0b3d3be358ff14a7f88bd9814845b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -496,8 +496,9 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq) { struct mlx5e_tx_wqe_info *wi; + u32 nbytes = 0; + u16 ci, npkts = 0; struct sk_buff *skb; - u16 ci; int i; while (sq->cc != sq->pc) { @@ -518,8 +519,11 @@ void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq) } dev_kfree_skb_any(skb); + npkts++; + nbytes += wi->num_bytes; sq->cc += wi->num_wqebbs; } + netdev_tx_completed_queue(sq->txq, npkts, nbytes); } #ifdef CONFIG_MLX5_CORE_IPOIB diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index fad26046e159565043aa4e58f506b4ad4e08e228..96f9f267d16d4a94719aa4d892a64f9d4f3e80c7 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -619,7 +619,7 @@ static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core) err = mlxsw_core_trap_register(mlxsw_core, &mlxsw_emad_rx_listener, mlxsw_core); if (err) - return err; + goto err_trap_register; err = mlxsw_core->driver->basic_trap_groups_set(mlxsw_core); if (err) @@ -631,6 +631,7 @@ static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core) err_emad_trap_set: mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener, mlxsw_core); +err_trap_register: destroy_workqueue(mlxsw_core->emad_wq); return err; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 05a2006a20b9b636ae2df6bc3315e4af35abfefd..d9cd86c67556927c6c03ca57d0fe02b3dff9d9e8 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -4932,7 +4932,7 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb, return NOTIFY_DONE; fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC); - if (WARN_ON(!fib_work)) + if (!fib_work) return NOTIFY_BAD; router = container_of(nb, struct mlxsw_sp_router, fib_nb); diff --git a/drivers/net/ethernet/microchip/encx24j600.c b/drivers/net/ethernet/microchip/encx24j600.c index f831238d9793abe8b90b36b1e521cb3943230e2c..84b6ad76f5bca74680799b15cea6004d387e1b86 100644 --- a/drivers/net/ethernet/microchip/encx24j600.c +++ b/drivers/net/ethernet/microchip/encx24j600.c @@ -1075,7 +1075,7 @@ static int encx24j600_spi_probe(struct spi_device *spi) if (unlikely(ret)) { netif_err(priv, probe, ndev, "Error %d initializing card encx24j600 card\n", ret); - goto out_free; + goto out_stop; } eidled = encx24j600_read_reg(priv, EIDLED); @@ -1093,6 +1093,8 @@ static int encx24j600_spi_probe(struct spi_device *spi) out_unregister: unregister_netdev(priv->ndev); +out_stop: + kthread_stop(priv->kworker_task); out_free: free_netdev(ndev); @@ -1105,6 +1107,7 @@ static int encx24j600_spi_remove(struct spi_device *spi) struct encx24j600_priv *priv = dev_get_drvdata(&spi->dev); unregister_netdev(priv->ndev); + kthread_stop(priv->kworker_task); free_netdev(priv->ndev); diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c index 2e4effa9fe456d4c0e8029136bf99cffad407dd2..beb730ff5d4217556165d725765fa246c546070e 100644 --- a/drivers/net/ethernet/moxa/moxart_ether.c +++ b/drivers/net/ethernet/moxa/moxart_ether.c @@ -561,7 +561,7 @@ static int moxart_remove(struct platform_device *pdev) struct net_device *ndev = platform_get_drvdata(pdev); unregister_netdev(ndev); - free_irq(ndev->irq, ndev); + devm_free_irq(&pdev->dev, ndev->irq, ndev); moxart_mac_free_memory(ndev); free_netdev(ndev); diff --git a/drivers/net/ethernet/natsemi/jazzsonic.c b/drivers/net/ethernet/natsemi/jazzsonic.c index d5b28884e21eb10f742935df7d10ce3d4793dcab..9a6c91c9d111cdc02935588d97df6a0ef4fc33ae 100644 --- a/drivers/net/ethernet/natsemi/jazzsonic.c +++ b/drivers/net/ethernet/natsemi/jazzsonic.c @@ -247,13 +247,15 @@ static int jazz_sonic_probe(struct platform_device *pdev) goto out; err = register_netdev(dev); if (err) - goto out1; + goto undo_probe1; printk("%s: MAC %pM IRQ %d\n", dev->name, dev->dev_addr, dev->irq); return 0; -out1: +undo_probe1: + dma_free_coherent(lp->device, SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode), + lp->descriptors, lp->descriptors_laddr); release_mem_region(dev->base_addr, SONIC_MEM_SIZE); out: free_netdev(dev); diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c index 41d30f55c946be682dd6d790f5ab5ca9f6c88cea..6bd6c261f2ba9866adfd4a1efe80b9d5b7ce0c3f 100644 --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c @@ -845,7 +845,8 @@ static int lpc_mii_init(struct netdata_local *pldat) if (mdiobus_register(pldat->mii_bus)) goto err_out_unregister_bus; - if (lpc_mii_probe(pldat->ndev) != 0) + err = lpc_mii_probe(pldat->ndev); + if (err) goto err_out_unregister_bus; return 0; diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index 27ba476f761d4c2ad11094ff44516104f21a330e..4fc3468f6f38ba244c8afc8226883457fc62f835 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -396,7 +396,7 @@ static void qed_cxt_qm_iids(struct qed_hwfn *p_hwfn, vf_tids += segs[NUM_TASK_PF_SEGMENTS].count; } - iids->vf_cids += vf_cids * p_mngr->vf_count; + iids->vf_cids = vf_cids; iids->tids += vf_tids * p_mngr->vf_count; DP_VERBOSE(p_hwfn, QED_MSG_ILT, diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c index a2a9921b467b18ce63b7b57be4cd821dc8e3c27e..693f2a03938358b4a855ebaafb98ba2267c79987 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c @@ -81,12 +81,17 @@ static void qed_vf_pf_req_end(struct qed_hwfn *p_hwfn, int req_status) mutex_unlock(&(p_hwfn->vf_iov_info->mutex)); } +#define QED_VF_CHANNEL_USLEEP_ITERATIONS 90 +#define QED_VF_CHANNEL_USLEEP_DELAY 100 +#define QED_VF_CHANNEL_MSLEEP_ITERATIONS 10 +#define QED_VF_CHANNEL_MSLEEP_DELAY 25 + static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size) { union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request; struct ustorm_trigger_vf_zone trigger; struct ustorm_vf_zone *zone_data; - int rc = 0, time = 100; + int iter, rc = 0; zone_data = (struct ustorm_vf_zone *)PXP_VF_BAR0_START_USDM_ZONE_B; @@ -126,11 +131,19 @@ static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size) REG_WR(p_hwfn, (uintptr_t)&zone_data->trigger, *((u32 *)&trigger)); /* When PF would be done with the response, it would write back to the - * `done' address. Poll until then. + * `done' address from a coherent DMA zone. Poll until then. */ - while ((!*done) && time) { - msleep(25); - time--; + + iter = QED_VF_CHANNEL_USLEEP_ITERATIONS; + while (!*done && iter--) { + udelay(QED_VF_CHANNEL_USLEEP_DELAY); + dma_rmb(); + } + + iter = QED_VF_CHANNEL_MSLEEP_ITERATIONS; + while (!*done && iter--) { + msleep(QED_VF_CHANNEL_MSLEEP_DELAY); + dma_rmb(); } if (!*done) { diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index a80531b5aeccdd9bf786a46365f8b4db866af289..c132b08cefde78e4b8e8376beb54267ebc8ccf26 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -528,12 +528,14 @@ void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq); #define RX_RING_SIZE ((u16)BIT(RX_RING_SIZE_POW)) #define NUM_RX_BDS_MAX (RX_RING_SIZE - 1) #define NUM_RX_BDS_MIN 128 +#define NUM_RX_BDS_KDUMP_MIN 63 #define NUM_RX_BDS_DEF ((u16)BIT(10) - 1) #define TX_RING_SIZE_POW 13 #define TX_RING_SIZE ((u16)BIT(TX_RING_SIZE_POW)) #define NUM_TX_BDS_MAX (TX_RING_SIZE - 1) #define NUM_TX_BDS_MIN 128 +#define NUM_TX_BDS_KDUMP_MIN 63 #define NUM_TX_BDS_DEF NUM_TX_BDS_MAX #define QEDE_MIN_PKT_LEN 64 diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index dab202f343c622c7d0982ec0a741e34b8b018acd..8bb734486bf3c1703b064e8a8f4a782ed9c21922 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -29,6 +29,7 @@ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ +#include #include #include #include @@ -624,8 +625,14 @@ static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev, edev->dp_module = dp_module; edev->dp_level = dp_level; edev->ops = qed_ops; - edev->q_num_rx_buffers = NUM_RX_BDS_DEF; - edev->q_num_tx_buffers = NUM_TX_BDS_DEF; + + if (is_kdump_kernel()) { + edev->q_num_rx_buffers = NUM_RX_BDS_KDUMP_MIN; + edev->q_num_tx_buffers = NUM_TX_BDS_KDUMP_MIN; + } else { + edev->q_num_rx_buffers = NUM_RX_BDS_DEF; + edev->q_num_tx_buffers = NUM_TX_BDS_DEF; + } DP_INFO(edev, "Allocated netdev with %d tx queues and %d rx queues\n", info->num_queues, info->num_queues); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index 1fc84d8f891b3116cf8ee5b9735db7dd82fdabcd..aae81226a0a46baf6cee32398463e32290891aff 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -3650,7 +3650,7 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev) ahw->diag_cnt = 0; ret = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INTRPT_TEST); if (ret) - goto fail_diag_irq; + goto fail_mbx_args; if (adapter->flags & QLCNIC_MSIX_ENABLED) intrpt_id = ahw->intr_tbl[0].id; @@ -3680,6 +3680,8 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev) done: qlcnic_free_mbx_args(&cmd); + +fail_mbx_args: qlcnic_83xx_diag_free_res(netdev, drv_sds_rings); fail_diag_irq: diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c index 056cb60936303563ac835a8a450e29566cd2bb86..8ad05e5008299065e2811774bb3110fb8276365c 100644 --- a/drivers/net/ethernet/rocker/rocker_main.c +++ b/drivers/net/ethernet/rocker/rocker_main.c @@ -651,10 +651,10 @@ static int rocker_dma_rings_init(struct rocker *rocker) err_dma_event_ring_bufs_alloc: rocker_dma_ring_destroy(rocker, &rocker->event_ring); err_dma_event_ring_create: + rocker_dma_cmd_ring_waits_free(rocker); +err_dma_cmd_ring_waits_alloc: rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring, PCI_DMA_BIDIRECTIONAL); -err_dma_cmd_ring_waits_alloc: - rocker_dma_cmd_ring_waits_free(rocker); err_dma_cmd_ring_bufs_alloc: rocker_dma_ring_destroy(rocker, &rocker->cmd_ring); return err; diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c index 96ac0d3af6f5b0eb15c8636021bce42e5da80a13..f570a37c68c23eb884ad8327daba943076220a6b 100644 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c @@ -2294,7 +2294,7 @@ static int smc_drv_probe(struct platform_device *pdev) ret = try_toggle_control_gpio(&pdev->dev, &lp->power_gpio, "power", 0, 0, 100); if (ret) - return ret; + goto out_free_netdev; /* * Optional reset GPIO configured? Minimum 100 ns reset needed @@ -2303,7 +2303,7 @@ static int smc_drv_probe(struct platform_device *pdev) ret = try_toggle_control_gpio(&pdev->dev, &lp->reset_gpio, "reset", 0, 0, 100); if (ret) - return ret; + goto out_free_netdev; /* * Need to wait for optional EEPROM to load, max 750 us according diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index ce4bfecc26c7aadfada60b1c2dd7b628c89a9075..ae80a223975db71b20615a4fa13d204f04653fc3 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c @@ -2515,20 +2515,20 @@ static int smsc911x_drv_probe(struct platform_device *pdev) retval = smsc911x_init(dev); if (retval < 0) - goto out_disable_resources; + goto out_init_fail; netif_carrier_off(dev); retval = smsc911x_mii_init(pdev, dev); if (retval) { SMSC_WARN(pdata, probe, "Error %i initialising mii", retval); - goto out_disable_resources; + goto out_init_fail; } retval = register_netdev(dev); if (retval) { SMSC_WARN(pdata, probe, "Error %i registering device", retval); - goto out_disable_resources; + goto out_init_fail; } else { SMSC_TRACE(pdata, probe, "Network interface: \"%s\"", dev->name); @@ -2569,9 +2569,10 @@ static int smsc911x_drv_probe(struct platform_device *pdev) return 0; -out_disable_resources: +out_init_fail: pm_runtime_put(&pdev->dev); pm_runtime_disable(&pdev->dev); +out_disable_resources: (void)smsc911x_disable_resources(pdev); out_enable_resources_fail: smsc911x_free_resources(pdev); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c index 11a4a81b0397cd3ce02550af97f85cd0256744f4..bcc5d1e16ce2ccd8a819ea9a1ab4e45254c491e2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c @@ -330,6 +330,19 @@ static int ipq806x_gmac_probe(struct platform_device *pdev) /* Enable PTP clock */ regmap_read(gmac->nss_common, NSS_COMMON_CLK_GATE, &val); val |= NSS_COMMON_CLK_GATE_PTP_EN(gmac->id); + switch (gmac->phy_mode) { + case PHY_INTERFACE_MODE_RGMII: + val |= NSS_COMMON_CLK_GATE_RGMII_RX_EN(gmac->id) | + NSS_COMMON_CLK_GATE_RGMII_TX_EN(gmac->id); + break; + case PHY_INTERFACE_MODE_SGMII: + val |= NSS_COMMON_CLK_GATE_GMII_RX_EN(gmac->id) | + NSS_COMMON_CLK_GATE_GMII_TX_EN(gmac->id); + break; + default: + /* We don't get here; the switch above will have errored out */ + unreachable(); + } regmap_write(gmac->nss_common, NSS_COMMON_CLK_GATE, val); if (gmac->phy_mode == PHY_INTERFACE_MODE_SGMII) { diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c index e1348f6c4c73e6edf6c83fbf346d1bf030f83800..a6865d64b8b0764a91d111b9ec32c57b82362f1f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c @@ -24,6 +24,7 @@ #include #include #include +#include #include "stmmac.h" #include "stmmac_platform.h" @@ -158,6 +159,13 @@ u16 dwmac_qcom_select_queue( txqueue_select = ALL_OTHER_TX_TRAFFIC_IPA_DISABLED; } + /* use better macro, cannot afford function call here */ + if (ipa_enabled && (txqueue_select == IPA_DMA_TX_CH_BE || + txqueue_select == IPA_DMA_TX_CH_CV2X)) { + ETHQOSERR("TX Channel [%d] is not a valid for SW path\n", + txqueue_select); + WARN_ON(1); + } ETHQOSDBG("tx_queue %d\n", txqueue_select); return txqueue_select; } @@ -411,8 +419,10 @@ static int qcom_ethqos_add_ipv6addr(struct ip_params *ip_info, struct net *net = dev_net(dev); /*For valid IPv6 address*/ - if (!net || !net->genl_sock || !net->genl_sock->sk_socket) + if (!net || !net->genl_sock || !net->genl_sock->sk_socket) { ETHQOSERR("Sock is null, unable to assign ipv6 address\n"); + return -EFAULT; + } if (!net->ipv6.devconf_dflt) { ETHQOSERR("ipv6.devconf_dflt is null, schedule wq\n"); @@ -547,7 +557,7 @@ static int qcom_ethqos_qmp_mailbox_init(struct qcom_ethqos *ethqos) ethqos->qmp_mbox_client = devm_kzalloc( ðqos->pdev->dev, sizeof(*ethqos->qmp_mbox_client), GFP_KERNEL); - if (IS_ERR(ethqos->qmp_mbox_client)) { + if (!ethqos->qmp_mbox_client || IS_ERR(ethqos->qmp_mbox_client)) { ETHQOSERR("qmp alloc client failed\n"); return -EINVAL; } @@ -1031,8 +1041,8 @@ static void ethqos_handle_phy_interrupt(struct qcom_ethqos *ethqos) struct stmmac_priv *priv = netdev_priv(dev); int micrel_intr_status = 0; - if ((dev->phydev->phy_id & dev->phydev->drv->phy_id_mask) - == MICREL_PHY_ID) { + if (dev->phydev && ((dev->phydev->phy_id & + dev->phydev->drv->phy_id_mask) == MICREL_PHY_ID)) { phy_intr_status = ethqos_mdio_read( priv, priv->plat->phy_addr, DWC_ETH_QOS_BASIC_STATUS); ETHQOSDBG( @@ -1147,15 +1157,23 @@ static ssize_t read_phy_reg_dump(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct qcom_ethqos *ethqos = file->private_data; + struct platform_device *pdev; + struct net_device *dev; + struct stmmac_priv *priv; unsigned int len = 0, buf_len = 2000; char *buf; ssize_t ret_cnt; int phydata = 0; int i = 0; - struct platform_device *pdev = ethqos->pdev; - struct net_device *dev = platform_get_drvdata(pdev); - struct stmmac_priv *priv = netdev_priv(dev); + if (!ethqos) { + ETHQOSERR("NULL Pointer\n"); + return -EINVAL; + } + + pdev = ethqos->pdev; + dev = platform_get_drvdata(pdev); + priv = netdev_priv(dev); if (!ethqos || !dev->phydev) { ETHQOSERR("NULL Pointer\n"); @@ -1195,13 +1213,20 @@ static ssize_t read_rgmii_reg_dump(struct file *file, loff_t *ppos) { struct qcom_ethqos *ethqos = file->private_data; + struct platform_device *pdev; + struct net_device *dev; unsigned int len = 0, buf_len = 2000; char *buf; ssize_t ret_cnt; int rgmii_data = 0; - struct platform_device *pdev = ethqos->pdev; - struct net_device *dev = platform_get_drvdata(pdev); + if (!ethqos) { + ETHQOSERR("NULL Pointer\n"); + return -EINVAL; + } + + pdev = ethqos->pdev; + dev = platform_get_drvdata(pdev); if (!ethqos || !dev->phydev) { ETHQOSERR("NULL Pointer\n"); @@ -1538,15 +1563,11 @@ static void setup_config_registers(struct qcom_ethqos *ethqos, if (mode > DISABLE_LOOPBACK && !qcom_ethqos_is_phy_link_up(ethqos)) { /*If Link is Down & need to enable Loopback*/ ETHQOSDBG("Link is down . manual ipa setting up\n"); - if (priv->tx_queue[IPA_DMA_TX_CH].skip_sw) - ethqos_ipa_offload_event_handler(priv, - EV_PHY_LINK_UP); + ethqos_ipa_offload_event_handler(priv, EV_PHY_LINK_UP); } else if (mode == DISABLE_LOOPBACK && !qcom_ethqos_is_phy_link_up(ethqos)) { ETHQOSDBG("Disable request since link was down disable ipa\n"); - if (priv->tx_queue[IPA_DMA_TX_CH].skip_sw) - ethqos_ipa_offload_event_handler(priv, - EV_PHY_LINK_DOWN); + ethqos_ipa_offload_event_handler(priv, EV_PHY_LINK_DOWN); } if (priv->dev->phydev->speed != SPEED_UNKNOWN) @@ -1609,13 +1630,11 @@ static ssize_t loopback_handling_config( } /*Argument validation*/ - if (config == DISABLE_LOOPBACK || config == ENABLE_IO_MACRO_LOOPBACK || + if (config == ENABLE_IO_MACRO_LOOPBACK || config == ENABLE_MAC_LOOPBACK || config == ENABLE_PHY_LOOPBACK) { if (speed != SPEED_1000 && speed != SPEED_100 && speed != SPEED_10) return -EINVAL; - } else { - return -EINVAL; } if (config == ethqos->current_loopback) { @@ -2324,7 +2343,8 @@ static void ethqos_set_early_eth_param( priv->plat->mdio_bus_data->phy_mask = priv->plat->mdio_bus_data->phy_mask | DUPLEX_FULL | SPEED_100; - priv->plat->max_speed = SPEED_100; + if (priv->plat) + priv->plat->max_speed = SPEED_100; if (pparams.is_valid_ipv4_addr) { INIT_DELAYED_WORK(ðqos->ipv4_addr_assign_wq, @@ -2357,12 +2377,23 @@ bool qcom_ethqos_ipa_enabled(void) static ssize_t ethqos_read_dev_emac(struct file *filp, char __user *buf, size_t count, loff_t *f_pos) { - unsigned int len = 0; - char *temp_buf; - ssize_t ret_cnt = 0; + struct eth_msg_meta msg; + u8 status = 0; - ret_cnt = simple_read_from_buffer(buf, count, f_pos, temp_buf, len); - return ret_cnt; + memset(&msg, 0, sizeof(struct eth_msg_meta)); + + if (pethqos && pethqos->ipa_enabled) + ethqos_ipa_offload_event_handler( + &status, EV_QTI_GET_CONN_STATUS); + + msg.msg_type = status; + + ETHQOSDBG("status %02x\n", status); + ETHQOSDBG("msg.msg_type %02x\n", msg.msg_type); + ETHQOSDBG("msg.rsvd %02x\n", msg.rsvd); + ETHQOSDBG("msg.msg_len %d\n", msg.msg_len); + + return copy_to_user(buf, &msg, sizeof(struct eth_msg_meta)); } static ssize_t ethqos_write_dev_emac(struct file *file, @@ -2384,6 +2415,7 @@ static ssize_t ethqos_write_dev_emac(struct file *file, ETHQOSERR("emac string is too long - count=%u\n", count); return -EFAULT; } + memset(in_buf, 0, sizeof(in_buf)); ret = copy_from_user(in_buf, user_buf, count); @@ -2484,10 +2516,42 @@ static void ethqos_get_qoe_dt(struct qcom_ethqos *ethqos, } } +static DECLARE_WAIT_QUEUE_HEAD(dev_emac_wait); +#ifdef CONFIG_ETH_IPA_OFFLOAD +void ethqos_wakeup_dev_emac_queue(void) +{ + ETHQOSDBG("\n"); + wake_up_interruptible(&dev_emac_wait); +} +#endif + +static unsigned int ethqos_poll_dev_emac(struct file *file, poll_table *wait) +{ + int mask = 0; + int update = 0; + + ETHQOSDBG("\n"); + + poll_wait(file, &dev_emac_wait, wait); + + if (pethqos && pethqos->ipa_enabled && pethqos->cv2x_mode) + ethqos_ipa_offload_event_handler( + &update, EV_QTI_CHECK_CONN_UPDATE); + + if (update) + mask = POLLIN | POLLRDNORM; + + ETHQOSDBG("mask %d\n", mask); + + return mask; +} + static const struct file_operations emac_fops = { .owner = THIS_MODULE, + .open = simple_open, .read = ethqos_read_dev_emac, .write = ethqos_write_dev_emac, + .poll = ethqos_poll_dev_emac, }; static int ethqos_create_emac_device_node(dev_t *emac_dev_t, @@ -2575,6 +2639,100 @@ static void ethqos_get_cv2x_dt(struct qcom_ethqos *ethqos, } } +inline u32 qcom_ethqos_rgmii_io_macro_num_of_regs(u32 emac_hw_version) +{ + switch (emac_hw_version) { + case EMAC_HW_v2_0_0: + return 27; + case EMAC_HW_v2_1_0: + return 27; + case EMAC_HW_v2_1_1: + return 27; + case EMAC_HW_v2_1_2: + return 27; + case EMAC_HW_v2_2_0: + return 27; + case EMAC_HW_v2_3_0: + return 28; + case EMAC_HW_v2_3_1: + return 27; + case EMAC_HW_v2_3_2: + return 29; + case EMAC_HW_NONE: + default: + return 0; + } +} + +static int qcom_ethos_panic_notifier(struct notifier_block *this, + unsigned long event, void *ptr) +{ + u32 size_iomacro_regs; + + if (pethqos) { + size_iomacro_regs = + qcom_ethqos_rgmii_io_macro_num_of_regs(pethqos->emac_ver) * 4; + ETHQOSINFO("pethqos 0x%p", pethqos); + + pethqos->iommu_domain = stmmac_emb_smmu_ctx.iommu_domain; + ETHQOSINFO("emac iommu domain 0x%p", pethqos->iommu_domain); + + pethqos->emac_reg_base_address = + kzalloc(pethqos->emac_mem_size, GFP_KERNEL); + ETHQOSINFO("emac register mem 0x%p", pethqos->emac_mem_base); + if (pethqos->emac_mem_base) + memcpy_fromio(pethqos->emac_reg_base_address, + pethqos->ioaddr, + pethqos->emac_mem_size); + + pethqos->rgmii_reg_base_address = + kzalloc(size_iomacro_regs, GFP_KERNEL); + ETHQOSINFO + ("rgmii register mem 0x%p", pethqos->rgmii_reg_base_address); + if (pethqos->rgmii_reg_base_address) + memcpy_fromio(pethqos->rgmii_reg_base_address, + pethqos->rgmii_base, + size_iomacro_regs); + } + return NOTIFY_DONE; +} + +static struct notifier_block qcom_ethqos_panic_blk = { + .notifier_call = qcom_ethos_panic_notifier, +}; + +static void read_mac_addr_from_fuse_reg(struct device_node *np) +{ + int ret, i; + u32 mac_efuse_prop, efuse_size = 8; + void __iomem *mac_efuse_addr; + unsigned long mac_addr; + + ret = of_property_read_u32(np, "mac-efuse-addr", &mac_efuse_prop); + if (!ret) { + mac_efuse_addr = ioremap(mac_efuse_prop, efuse_size); + if (!mac_efuse_addr) { + ETHQOSERR("unable to do ioremap\n"); + return; + } + mac_addr = readq(mac_efuse_addr); + ETHQOSINFO("Mac address read: %llx\n", mac_addr); + + /* create byte array out of value read from efuse */ + for (i = 0; i < ETH_ALEN ; i++) { + pparams.mac_addr[ETH_ALEN - 1 - i] = mac_addr & 0xff; + mac_addr = mac_addr >> 8; + } + + pparams.is_valid_mac_addr = + is_valid_ether_addr(pparams.mac_addr); + if (!pparams.is_valid_mac_addr) { + ETHQOSERR("Invalid Mac address set: %llx\n", mac_addr); + return; + } + } +} + static int qcom_ethqos_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; @@ -2607,8 +2765,7 @@ static int qcom_ethqos_probe(struct platform_device *pdev) ethqos = devm_kzalloc(&pdev->dev, sizeof(*ethqos), GFP_KERNEL); if (!ethqos) { - ret = -ENOMEM; - goto err_mem; + return -ENOMEM; } ethqos->pdev = pdev; @@ -2659,6 +2816,9 @@ static int qcom_ethqos_probe(struct platform_device *pdev) if (ret) goto err_mem; + /* Read mac address from fuse register */ + read_mac_addr_from_fuse_reg(np); + /*Initialize Early ethernet to false*/ ethqos->early_eth_enabled = false; @@ -2769,6 +2929,8 @@ static int qcom_ethqos_probe(struct platform_device *pdev) if (ret) goto err_clk; + atomic_notifier_chain_register(&panic_notifier_list, + &qcom_ethqos_panic_blk); rgmii_dump(ethqos); if (ethqos->emac_ver == EMAC_HW_v2_3_2) { @@ -2823,12 +2985,9 @@ static int qcom_ethqos_probe(struct platform_device *pdev) ðqos->emac_class, "emac"); } + #ifdef CONFIG_ETH_IPA_OFFLOAD - ethqos->ipa_enabled = true; - priv->rx_queue[IPA_DMA_RX_CH].skip_sw = true; - priv->tx_queue[IPA_DMA_TX_CH].skip_sw = true; ethqos_ipa_offload_event_handler(ethqos, EV_PROBE_INIT); - priv->hw->mac->map_mtl_to_dma(priv->hw, 0, 1); //change #endif #ifdef CONFIG_MSM_BOOT_TIME_MARKER diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.h b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.h index 3c9ae7855254fcd0d6778203602fc86a312a5d58..fe91545a7bc6ac4b08552834d83546ca4877c878 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.h @@ -374,9 +374,9 @@ enum current_phy_state { #define RGMII_IO_MACRO_CONFIG_RGRD(data)\ ((data) = (readl_relaxed((RGMII_IO_MACRO_CONFIG_RGOFFADDR)))) -#define RGMII_GPIO_CFG_TX_INT_MASK (unsigned long)(0x3) +#define RGMII_GPIO_CFG_TX_INT_MASK (unsigned long)(0x7) -#define RGMII_GPIO_CFG_TX_INT_WR_MASK (unsigned long)(0xfff9ffff) +#define RGMII_GPIO_CFG_TX_INT_WR_MASK (unsigned long)(0xfff1ffff) #define RGMII_GPIO_CFG_TX_INT_UDFWR(data) do {\ unsigned long v;\ @@ -388,13 +388,13 @@ enum current_phy_state { #define RGMII_GPIO_CFG_RX_INT_MASK (unsigned long)(0x3) -#define RGMII_GPIO_CFG_RX_INT_WR_MASK (unsigned long)(0xffe7ffff) +#define RGMII_GPIO_CFG_RX_INT_WR_MASK (unsigned long)(0xFFCFFFFF) #define RGMII_GPIO_CFG_RX_INT_UDFWR(data) do {\ unsigned long v;\ RGMII_IO_MACRO_CONFIG_RGRD(v);\ v = ((v & RGMII_GPIO_CFG_RX_INT_WR_MASK) | \ - ((data & RGMII_GPIO_CFG_RX_INT_MASK) << 19));\ + ((data & RGMII_GPIO_CFG_RX_INT_MASK) << 20));\ RGMII_IO_MACRO_CONFIG_RGWR(v);\ } while (0) @@ -426,6 +426,32 @@ struct ethqos_io_macro { bool rx_dll_bypass; }; +struct ethqos_extra_dma_stats { + /* DMA status registers for all channels [0-4] */ + unsigned long dma_ch_status[MTL_MAX_TX_QUEUES]; + unsigned long dma_ch_intr_enable[MTL_MAX_TX_QUEUES]; + unsigned long dma_ch_intr_status; + unsigned long dma_debug_status0; + unsigned long dma_debug_status1; + + /* RX DMA descriptor status registers for all channels [0-4] */ + unsigned long dma_ch_rx_control[MTL_MAX_RX_QUEUES]; + unsigned long dma_ch_rxdesc_list_addr[MTL_MAX_RX_QUEUES]; + unsigned long dma_ch_rxdesc_ring_len[MTL_MAX_RX_QUEUES]; + unsigned long dma_ch_curr_app_rxdesc[MTL_MAX_RX_QUEUES]; + unsigned long dma_ch_rxdesc_tail_ptr[MTL_MAX_RX_QUEUES]; + unsigned long dma_ch_curr_app_rxbuf[MTL_MAX_RX_QUEUES]; + unsigned long dma_ch_miss_frame_count[MTL_MAX_RX_QUEUES]; + + /* TX DMA descriptors status for all channels [0-5] */ + unsigned long dma_ch_tx_control[MTL_MAX_TX_QUEUES]; + unsigned long dma_ch_txdesc_list_addr[MTL_MAX_TX_QUEUES]; + unsigned long dma_ch_txdesc_ring_len[MTL_MAX_TX_QUEUES]; + unsigned long dma_ch_curr_app_txdesc[MTL_MAX_TX_QUEUES]; + unsigned long dma_ch_txdesc_tail_ptr[MTL_MAX_TX_QUEUES]; + unsigned long dma_ch_curr_app_txbuf[MTL_MAX_TX_QUEUES]; +}; + struct qcom_ethqos { struct platform_device *pdev; void __iomem *rgmii_base; @@ -438,6 +464,10 @@ struct qcom_ethqos { unsigned int speed; unsigned int vote_idx; + struct iommu_domain *iommu_domain; + unsigned int *emac_reg_base_address; + unsigned int *rgmii_reg_base_address; + int gpio_phy_intr_redirect; u32 phy_intr; /* Work struct for handling phy interrupt */ @@ -504,6 +534,7 @@ struct qcom_ethqos { bool ipa_enabled; /* Key Performance Indicators */ bool print_kpi; + unsigned int emac_phy_off_suspend; int loopback_speed; enum loopback_mode current_loopback; @@ -527,6 +558,8 @@ struct qcom_ethqos { u32 cv2x_mode; struct ethqos_vlan_info cv2x_vlan; unsigned char cv2x_dev_addr[ETH_ALEN]; + + struct ethqos_extra_dma_stats xstats; }; struct pps_cfg { @@ -598,8 +631,6 @@ u16 dwmac_qcom_select_queue( #define PTP_UDP_EV_PORT 0x013F #define PTP_UDP_GEN_PORT 0x0140 -#define IPA_DMA_TX_CH 0 -#define IPA_DMA_RX_CH 0 #define CV2X_TAG_TX_CHANNEL 3 #define QMI_TAG_TX_CHANNEL 2 diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ipa-offload.h b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ipa-offload.h index 7c3f664a372c85874837d9f8ee69318ba51f2b09..317b6681530476ef907990d24471f4ed5b9ac3e1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ipa-offload.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ipa-offload.h @@ -13,11 +13,10 @@ #ifndef _DWMAC_QCOM_ETH_IPA_OFFLOAD_H #define _DWMAC_QCOM_ETH_IPA_OFFLOAD_H -#define IPA_DMA_RX_CH 0 -#define IPA_DMA_TX_CH 0 - #define ALL_OTHER_TX_TRAFFIC_IPA_DISABLED 0 #define ALL_OTHER_TRAFFIC_TX_CHANNEL 1 +#define ETH_DEV_NAME_LEN 16 +#define ETH_DEV_ADDR_LEN 8 #define QTAG_VLAN_ETH_TYPE_OFFSET 16 #define QTAG_UCP_FIELD_OFFSET 14 @@ -35,9 +34,26 @@ buf1[QTAG_ETH_TYPE_OFFSET + 1]));\ } while (0) +enum ipa_queue_type { + IPA_QUEUE_BE = 0x0, + IPA_QUEUE_CV2X, + IPA_QUEUE_MAX, +}; + +enum ipa_intr_route_type { + IPA_INTR_ROUTE_HW = 0x0, + IPA_INTR_ROUTE_DB, + IPA_INTR_ROUTE_MAX, +}; + +#define IPA_DMA_RX_CH_BE 0 +#define IPA_DMA_TX_CH_BE 0 +#define IPA_DMA_RX_CH_CV2X 3 +#define IPA_DMA_TX_CH_CV2X 3 #ifdef CONFIG_ETH_IPA_OFFLOAD void ethqos_ipa_offload_event_handler(void *data, int ev); +void ethqos_wakeup_dev_emac_queue(void); #else static inline void ethqos_ipa_offload_event_handler(void *data, int ev) { @@ -58,6 +74,10 @@ static inline void ethqos_ipa_offload_event_handler(void *data, int ev) #define EV_USR_SUSPEND (EV_DPM_RESUME + 1) #define EV_USR_RESUME (EV_USR_SUSPEND + 1) #define EV_IPA_OFFLOAD_REMOVE (EV_USR_RESUME + 1) -#define EV_IPA_OFFLOAD_MAX (EV_IPA_OFFLOAD_REMOVE + 1) +#define EV_QTI_GET_CONN_STATUS (EV_IPA_OFFLOAD_REMOVE + 1) +#define EV_QTI_CHECK_CONN_UPDATE (EV_QTI_GET_CONN_STATUS + 1) +#define EV_IPA_HANDLE_RX_INTR (EV_QTI_CHECK_CONN_UPDATE + 1) +#define EV_IPA_HANDLE_TX_INTR (EV_IPA_HANDLE_RX_INTR + 1) +#define EV_IPA_OFFLOAD_MAX (EV_IPA_HANDLE_TX_INTR + 1) #endif diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ipa.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ipa.c index 48285bb179a8c096986389df1226d8c512777b2b..94214f716b240b58b05db83e67c3d3d6bce389cb 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ipa.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ipa.c @@ -30,6 +30,7 @@ #include #include #include +#include #include "stmmac.h" #include "stmmac_platform.h" @@ -42,9 +43,364 @@ #define NTN_IPA_DBG_MAX_MSG_LEN 3000 static char buf[3000]; static struct ethqos_prv_ipa_data eth_ipa_ctx; - static void __ipa_eth_free_msg(void *buff, u32 len, u32 type) {} +/* Network driver specific init for hw offload */ +static void eth_ipa_net_drv_init(void) +{ + struct platform_device *pdev = eth_ipa_ctx.ethqos->pdev; + struct net_device *dev = platform_get_drvdata(pdev); + struct stmmac_priv *priv = netdev_priv(dev); + + priv->hw_offload_enabled = true; + priv->rx_queue[IPA_DMA_RX_CH_BE].skip_sw = true; + priv->tx_queue[IPA_DMA_TX_CH_BE].skip_sw = true; + priv->hw->mac->map_mtl_to_dma(priv->hw, IPA_DMA_RX_CH_BE, 1); + + if (eth_ipa_ctx.ethqos->cv2x_mode == CV2X_MODE_MDM) { + priv->rx_queue[IPA_DMA_RX_CH_CV2X].skip_sw = true; + priv->tx_queue[IPA_DMA_TX_CH_CV2X].skip_sw = true; + priv->hw->mac->map_mtl_to_dma(priv->hw, IPA_DMA_RX_CH_CV2X, 1); + } +} + +/* IPA ctx initialization */ +static void eth_ipa_ctx_init(void) +{ + struct platform_device *pdev = eth_ipa_ctx.ethqos->pdev; + struct net_device *dev = platform_get_drvdata(pdev); + struct stmmac_priv *priv = netdev_priv(dev); + + mutex_init(ð_ipa_ctx.ipa_lock); + eth_ipa_ctx.ethqos->ipa_enabled = true; + + /* set queue enabled */ + eth_ipa_ctx.queue_enabled[IPA_QUEUE_BE] = true; + if (eth_ipa_ctx.ethqos->cv2x_mode == CV2X_MODE_MDM) + eth_ipa_ctx.queue_enabled[IPA_QUEUE_CV2X] = true; + + /* set queue/chan numbers */ + eth_ipa_ctx.rx_queue_num[IPA_QUEUE_BE] = IPA_DMA_RX_CH_BE; + eth_ipa_ctx.tx_queue_num[IPA_QUEUE_BE] = IPA_DMA_TX_CH_BE; + eth_ipa_ctx.rx_queue_num[IPA_QUEUE_CV2X] = IPA_DMA_RX_CH_CV2X; + eth_ipa_ctx.tx_queue_num[IPA_QUEUE_CV2X] = IPA_DMA_TX_CH_CV2X; + + /* set desc count for BE queues */ + if (eth_ipa_ctx.queue_enabled[IPA_QUEUE_BE]) { + if (of_property_read_u32( + eth_ipa_ctx.ethqos->pdev->dev.of_node, + "ipa-dma-rx-desc-cnt", + ð_ipa_ctx.ipa_dma_rx_desc_cnt[IPA_QUEUE_BE])) { + ETHQOSDBG(":resource ipa-dma-rx-desc-cnt not in dt\n"); + eth_ipa_ctx.ipa_dma_rx_desc_cnt[IPA_QUEUE_BE] = + IPA_RX_DESC_CNT_BE; + } + + if (of_property_read_u32( + eth_ipa_ctx.ethqos->pdev->dev.of_node, + "ipa-dma-tx-desc-cnt", + ð_ipa_ctx.ipa_dma_tx_desc_cnt[IPA_QUEUE_BE])) { + ETHQOSDBG(":resource ipa-dma-tx-desc-cnt not in dt\n"); + eth_ipa_ctx.ipa_dma_tx_desc_cnt[IPA_QUEUE_BE] = + IPA_RX_DESC_CNT_BE; + } + } + + /* set desc count for CV2X queues */ + if (eth_ipa_ctx.queue_enabled[IPA_QUEUE_CV2X]) { + if (of_property_read_u32( + eth_ipa_ctx.ethqos->pdev->dev.of_node, + "ipa-dma-rx-desc-cnt-cv2x", + ð_ipa_ctx.ipa_dma_rx_desc_cnt[IPA_QUEUE_CV2X])) { + ETHQOSDBG(":resource ipa-dma-rx-desc-cnt not in dt\n"); + eth_ipa_ctx.ipa_dma_rx_desc_cnt[IPA_QUEUE_CV2X] = + IPA_RX_DESC_CNT_CV2X; + } + + if (of_property_read_u32( + eth_ipa_ctx.ethqos->pdev->dev.of_node, + "ipa-dma-tx-desc-cnt-cv2x", + ð_ipa_ctx.ipa_dma_tx_desc_cnt[IPA_QUEUE_CV2X])) { + ETHQOSDBG(":resource ipa-dma-tx-desc-cnt not in dt\n"); + eth_ipa_ctx.ipa_dma_tx_desc_cnt[IPA_QUEUE_CV2X] = + IPA_TX_DESC_CNT_CV2X; + } + } + + /* set interrupt routing mode */ + if (eth_ipa_ctx.ethqos->cv2x_mode) { + eth_ipa_ctx.tx_intr_route_mode[IPA_QUEUE_BE] = + IPA_INTR_ROUTE_DB; + eth_ipa_ctx.rx_intr_route_mode[IPA_QUEUE_BE] = + IPA_INTR_ROUTE_DB; + eth_ipa_ctx.tx_intr_route_mode[IPA_QUEUE_CV2X] = + IPA_INTR_ROUTE_HW; + eth_ipa_ctx.rx_intr_route_mode[IPA_QUEUE_CV2X] = + IPA_INTR_ROUTE_HW; + } else { + eth_ipa_ctx.tx_intr_route_mode[IPA_QUEUE_BE] = + IPA_INTR_ROUTE_HW; + eth_ipa_ctx.rx_intr_route_mode[IPA_QUEUE_BE] = + IPA_INTR_ROUTE_HW; + } + + /* set buf len */ + eth_ipa_ctx.buf_len[IPA_QUEUE_BE] = ETHQOS_ETH_FRAME_LEN_IPA_BE; + eth_ipa_ctx.buf_len[IPA_QUEUE_CV2X] = ETHQOS_ETH_FRAME_LEN_IPA_CV2X; + + /* set ipa_notify_cb */ + eth_ipa_ctx.ipa_notify_cb[IPA_QUEUE_BE] = ntn_ipa_notify_cb_be; + eth_ipa_ctx.ipa_notify_cb[IPA_QUEUE_CV2X] = ntn_ipa_notify_cb_cv2x; + + /* set proto */ + eth_ipa_ctx.ipa_proto[IPA_QUEUE_BE] = IPA_UC_NTN; + eth_ipa_ctx.ipa_proto[IPA_QUEUE_CV2X] = IPA_UC_NTN_V2X; + + /* set ipa tx client */ + eth_ipa_ctx.tx_client[IPA_QUEUE_BE] = IPA_CLIENT_ETHERNET_CONS; + eth_ipa_ctx.tx_client[IPA_QUEUE_CV2X] = IPA_CLIENT_ETHERNET2_CONS; + + /* set ipa rx client */ + eth_ipa_ctx.rx_client[IPA_QUEUE_BE] = IPA_CLIENT_ETHERNET_PROD; + eth_ipa_ctx.rx_client[IPA_QUEUE_CV2X] = IPA_CLIENT_ETHERNET2_PROD; + + eth_ipa_ctx.rx_reg_base_ptr_pa[IPA_QUEUE_BE] = + (((phys_addr_t)(DMA_CR0_RGOFFADDR - BASE_ADDRESS)) + + (phys_addr_t)eth_ipa_ctx.ethqos->emac_mem_base); + + eth_ipa_ctx.rx_reg_base_ptr_pa[IPA_QUEUE_CV2X] = + (((phys_addr_t)(DMA_CR3_RGOFFADDR - BASE_ADDRESS)) + + (phys_addr_t)eth_ipa_ctx.ethqos->emac_mem_base); + + eth_ipa_ctx.tx_reg_base_ptr_pa[IPA_QUEUE_BE] = + (((phys_addr_t)(DMA_CR0_RGOFFADDR - BASE_ADDRESS)) + + (phys_addr_t)eth_ipa_ctx.ethqos->emac_mem_base); + + eth_ipa_ctx.tx_reg_base_ptr_pa[IPA_QUEUE_CV2X] = + (((phys_addr_t)(DMA_CR3_RGOFFADDR - BASE_ADDRESS)) + + (phys_addr_t)eth_ipa_ctx.ethqos->emac_mem_base); + + eth_ipa_ctx.need_send_msg[IPA_QUEUE_BE] = true; + eth_ipa_ctx.need_send_msg[IPA_QUEUE_CV2X] = false; + + strlcpy(eth_ipa_ctx.netdev_name[IPA_QUEUE_BE], + priv->dev->name, ETH_DEV_NAME_LEN); + strlcpy(eth_ipa_ctx.netdev_name[IPA_QUEUE_CV2X], + "eth.cv2x", ETH_DEV_NAME_LEN); + + ETHQOSDBG("eth_ipa_ctx.netdev_name[IPA_QUEUE_BE] %s\n", + eth_ipa_ctx.netdev_name[IPA_QUEUE_BE]); + ETHQOSDBG("eth_ipa_ctx.netdev_name[IPA_QUEUE_CV2X] %s\n", + eth_ipa_ctx.netdev_name[IPA_QUEUE_CV2X]); + + memcpy(eth_ipa_ctx.netdev_addr[IPA_QUEUE_BE], + priv->dev->dev_addr, ETH_ALEN); + memcpy(eth_ipa_ctx.netdev_addr[IPA_QUEUE_CV2X], + eth_ipa_ctx.ethqos->cv2x_dev_addr, ETH_ALEN); + + eth_ipa_ctx.netdev_index[IPA_QUEUE_BE] = priv->dev->ifindex; + eth_ipa_ctx.netdev_index[IPA_QUEUE_CV2X] = 0; + + eth_ipa_ctx.rx_intr_mod_cnt[IPA_QUEUE_BE] = 0; + eth_ipa_ctx.rx_intr_mod_cnt[IPA_QUEUE_CV2X] = 1; +} + +static inline bool eth_ipa_queue_type_supported(enum ipa_queue_type type) +{ + return (type >= 0 && type < IPA_QUEUE_MAX); +} + +static inline bool eth_ipa_queue_type_enabled(enum ipa_queue_type type) +{ + WARN_ON(!eth_ipa_queue_type_supported(type)); + return eth_ipa_ctx.queue_enabled[type]; +} + +static inline char *eth_ipa_queue_type_to_device_name(enum ipa_queue_type type) +{ + WARN_ON(!eth_ipa_queue_type_supported(type)); + return eth_ipa_ctx.netdev_name[type]; +} + +static inline u8 *eth_ipa_queue_type_to_device_addr(enum ipa_queue_type type) +{ + WARN_ON(!eth_ipa_queue_type_supported(type)); + return eth_ipa_ctx.netdev_addr[type]; +} + +static inline u8 eth_ipa_queue_type_to_if_index(enum ipa_queue_type type) +{ + WARN_ON(!eth_ipa_queue_type_supported(type)); + return eth_ipa_ctx.netdev_index[type]; +} + +static inline enum ipa_intr_route_type + eth_ipa_queue_type_to_tx_intr_route(enum ipa_queue_type type) +{ + WARN_ON(!eth_ipa_queue_type_supported(type)); + return eth_ipa_ctx.tx_intr_route_mode[type]; +} + +static inline enum ipa_intr_route_type + eth_ipa_queue_type_to_rx_intr_route(enum ipa_queue_type type) +{ + WARN_ON(!eth_ipa_queue_type_supported(type)); + return eth_ipa_ctx.rx_intr_route_mode[type]; +} + +static inline u8 eth_ipa_queue_type_to_tx_queue(enum ipa_queue_type type) +{ + WARN_ON(!eth_ipa_queue_type_supported(type)); + return eth_ipa_ctx.tx_queue_num[type]; +} + +static inline u8 eth_ipa_queue_type_to_rx_queue(enum ipa_queue_type type) +{ + WARN_ON(!eth_ipa_queue_type_supported(type)); + return eth_ipa_ctx.rx_queue_num[type]; +} + +static inline u32 eth_ipa_queue_type_to_tx_desc_count(enum ipa_queue_type type) +{ + WARN_ON(!eth_ipa_queue_type_supported(type)); + return eth_ipa_ctx.ipa_dma_tx_desc_cnt[type]; +} + +static inline u32 eth_ipa_queue_type_to_rx_desc_count(enum ipa_queue_type type) +{ + WARN_ON(!eth_ipa_queue_type_supported(type)); + return eth_ipa_ctx.ipa_dma_rx_desc_cnt[type]; +} + +static inline u32 +eth_ipa_queue_type_to_rx_intr_mod_cnt(enum ipa_queue_type type) +{ + WARN_ON(!eth_ipa_queue_type_supported(type)); + return eth_ipa_ctx.rx_intr_mod_cnt[type]; +} + +/* One common function for TX and RX buf lengths, + * to be changed if TX and RX have different buf lengths + */ +static inline u32 eth_ipa_queue_type_to_buf_length(enum ipa_queue_type type) +{ + WARN_ON(!eth_ipa_queue_type_supported(type)); + return eth_ipa_ctx.buf_len[type]; +} + +static inline ipa_notify_cb + eth_ipa_queue_type_to_ipa_notify_cb(enum ipa_queue_type type) +{ + WARN_ON(!eth_ipa_queue_type_supported(type)); + return eth_ipa_ctx.ipa_notify_cb[type]; +} + +static inline u32 eth_ipa_queue_type_to_proto(enum ipa_queue_type type) +{ + WARN_ON(!eth_ipa_queue_type_supported(type)); + return eth_ipa_ctx.ipa_proto[type]; +} + +static inline u32 eth_ipa_queue_type_to_tx_client(enum ipa_queue_type type) +{ + WARN_ON(!eth_ipa_queue_type_supported(type)); + return eth_ipa_ctx.tx_client[type]; +} + +static inline u32 eth_ipa_queue_type_to_rx_client(enum ipa_queue_type type) +{ + WARN_ON(!eth_ipa_queue_type_supported(type)); + return eth_ipa_ctx.rx_client[type]; +} + +static inline bool eth_ipa_queue_type_to_ipa_vlan_mode(enum ipa_queue_type type) +{ + bool ipa_vlan_mode = false; + + WARN_ON(!eth_ipa_queue_type_supported(type)); + + switch (type) { + case IPA_QUEUE_BE: + if (ipa_is_vlan_mode(IPA_VLAN_IF_EMAC, &ipa_vlan_mode)) { + ETHQOSERR("Could not read ipa_vlan_mode\n"); + /* In case of failure, fallback to non vlan mode */ + ipa_vlan_mode = false; + } + return ipa_vlan_mode; + case IPA_QUEUE_CV2X: + return true; + default: + return false; + } + + return false; +} + +static inline phys_addr_t eth_ipa_queue_type_to_rx_reg_base_ptr_pa( + enum ipa_queue_type type) +{ + WARN_ON(!eth_ipa_queue_type_supported(type)); + return eth_ipa_ctx.rx_reg_base_ptr_pa[type]; +} + +static inline phys_addr_t eth_ipa_queue_type_to_tx_reg_base_ptr_pa( + enum ipa_queue_type type) +{ + WARN_ON(!eth_ipa_queue_type_supported(type)); + return eth_ipa_ctx.tx_reg_base_ptr_pa[type]; +} + +static void eth_ipa_handle_rx_interrupt(unsigned int qinx) +{ + int type; + + if (!eth_ipa_ctx.ipa_offload_conn) { + ETHQOSERR("IPA Offload not connected\n"); + return; + } + + for (type = 0; type < IPA_QUEUE_MAX; type++) { + if (eth_ipa_queue_type_enabled(type) && + (eth_ipa_queue_type_to_rx_intr_route(type) == + IPA_INTR_ROUTE_DB) && + (eth_ipa_queue_type_to_rx_queue(type) == qinx)) { + ETHQOSDBG("writing for qinx %d db=%x\n", + qinx, eth_ipa_ctx.uc_db_rx_addr[type]); + writel_relaxed(1, eth_ipa_ctx.uc_db_rx_addr[type]); + break; + } + } +} + +static void eth_ipa_handle_tx_interrupt(unsigned int qinx) +{ + int type; + + if (!eth_ipa_ctx.ipa_offload_conn) { + ETHQOSERR("IPA Offload not connected\n"); + return; + } + + for (type = 0; type < IPA_QUEUE_MAX; type++) { + if (eth_ipa_queue_type_enabled(type) && + (eth_ipa_queue_type_to_tx_intr_route(type) == + IPA_INTR_ROUTE_DB) && + (eth_ipa_queue_type_to_tx_queue(type) == qinx)) { + ETHQOSDBG("writing for qinx %d db=%x\n", + qinx, eth_ipa_ctx.uc_db_tx_addr[type]); + writel_relaxed(1, eth_ipa_ctx.uc_db_tx_addr[type]); + break; + } + } +} + +static inline bool +eth_ipa_queue_type_to_send_msg_needed(enum ipa_queue_type type) +{ + WARN_ON(!eth_ipa_queue_type_supported(type)); + return eth_ipa_ctx.need_send_msg[type]; +} + static inline void *ethqos_get_priv(struct qcom_ethqos *ethqos) { struct platform_device *pdev = ethqos->pdev; @@ -55,7 +411,8 @@ static inline void *ethqos_get_priv(struct qcom_ethqos *ethqos) } static int eth_ipa_send_msg(struct qcom_ethqos *ethqos, - enum ipa_peripheral_event event) + enum ipa_peripheral_event event, + enum ipa_queue_type type) { struct ipa_msg_meta msg_meta; struct ipa_ecm_msg emac_msg; @@ -69,8 +426,9 @@ static int eth_ipa_send_msg(struct qcom_ethqos *ethqos, memset(&msg_meta, 0, sizeof(msg_meta)); memset(&emac_msg, 0, sizeof(emac_msg)); - emac_msg.ifindex = priv->dev->ifindex; - strlcpy(emac_msg.name, priv->dev->name, IPA_RESOURCE_NAME_MAX); + emac_msg.ifindex = eth_ipa_queue_type_to_if_index(type); + strlcpy(emac_msg.name, eth_ipa_queue_type_to_device_name(type), + IPA_RESOURCE_NAME_MAX); msg_meta.msg_type = event; msg_meta.msg_len = sizeof(struct ipa_ecm_msg); @@ -78,227 +436,242 @@ static int eth_ipa_send_msg(struct qcom_ethqos *ethqos, return ipa_send_msg(&msg_meta, &emac_msg, __ipa_eth_free_msg); } -static int ethqos_alloc_ipa_tx_queue_struct(struct qcom_ethqos *ethqos) +static int ethqos_alloc_ipa_tx_queue_struct(struct qcom_ethqos *ethqos, + enum ipa_queue_type type) { int ret = 0, chinx, cnt; struct platform_device *pdev = ethqos->pdev; struct net_device *dev = platform_get_drvdata(pdev); struct stmmac_priv *priv = netdev_priv(dev); - chinx = 0; - eth_ipa_ctx.tx_queue = + chinx = eth_ipa_queue_type_to_tx_queue(type); + + eth_ipa_ctx.tx_queue[type] = kzalloc(sizeof(struct ethqos_tx_queue), GFP_KERNEL); - if (!eth_ipa_ctx.tx_queue) { - ETHQOSERR("ERROR: Unable to allocate Tx queue structure\n"); + if (!eth_ipa_ctx.tx_queue[type]) { + ETHQOSERR("ERR: Unable to allocate Tx queue struct for %d\n", + type); ret = -ENOMEM; goto err_out_tx_q_alloc_failed; } - eth_ipa_ctx.tx_queue->desc_cnt = eth_ipa_ctx.ipa_dma_tx_desc_cnt; + eth_ipa_ctx.tx_queue[type]->desc_cnt = + eth_ipa_queue_type_to_tx_desc_count(type); /* Allocate tx_desc_ptrs */ - eth_ipa_ctx.tx_queue->tx_desc_ptrs = - kcalloc(eth_ipa_ctx.tx_queue->desc_cnt, + eth_ipa_ctx.tx_queue[type]->tx_desc_ptrs = + kcalloc(eth_ipa_ctx.tx_queue[type]->desc_cnt, sizeof(struct dma_desc *), GFP_KERNEL); - if (!eth_ipa_ctx.tx_queue->tx_desc_ptrs) { - ETHQOSERR("ERROR: Unable to allocate Tx Desc ptrs\n"); + if (!eth_ipa_ctx.tx_queue[type]->tx_desc_ptrs) { + ETHQOSERR("ERR: Unable to allocate Tx Desc ptrs for %d\n", + type); ret = -ENOMEM; goto err_out_tx_desc_ptrs_failed; } - for (cnt = 0; cnt < eth_ipa_ctx.tx_queue->desc_cnt; cnt++) { - eth_ipa_ctx.tx_queue->tx_desc_ptrs[cnt] - = eth_ipa_ctx.tx_queue->tx_desc_ptrs[0] + + for (cnt = 0; cnt < eth_ipa_ctx.tx_queue[type]->desc_cnt; cnt++) { + eth_ipa_ctx.tx_queue[type]->tx_desc_ptrs[cnt] + = eth_ipa_ctx.tx_queue[type]->tx_desc_ptrs[0] + (sizeof(struct dma_desc *) * cnt); } /* Allocate tx_desc_dma_addrs */ - eth_ipa_ctx.tx_queue->tx_desc_dma_addrs = + eth_ipa_ctx.tx_queue[type]->tx_desc_dma_addrs = kzalloc(sizeof(dma_addr_t) * - eth_ipa_ctx.tx_queue->desc_cnt, GFP_KERNEL); - if (!eth_ipa_ctx.tx_queue->tx_desc_dma_addrs) { - ETHQOSERR("ERROR: Unable to allocate Tx Desc dma addrs\n"); + eth_ipa_ctx.tx_queue[type]->desc_cnt, + GFP_KERNEL); + if (!eth_ipa_ctx.tx_queue[type]->tx_desc_dma_addrs) { + ETHQOSERR("ERR: Unable to allocate Tx Desc dma addrs for %d\n", + type); ret = -ENOMEM; goto err_out_tx_desc_dma_addrs_failed; } - for (cnt = 0; cnt < eth_ipa_ctx.tx_queue->desc_cnt; cnt++) { - eth_ipa_ctx.tx_queue->tx_desc_dma_addrs[cnt] - = eth_ipa_ctx.tx_queue->tx_desc_dma_addrs[0] + + for (cnt = 0; cnt < eth_ipa_ctx.tx_queue[type]->desc_cnt; cnt++) { + eth_ipa_ctx.tx_queue[type]->tx_desc_dma_addrs[cnt] + = eth_ipa_ctx.tx_queue[type]->tx_desc_dma_addrs[0] + (sizeof(dma_addr_t) * cnt); } /* Allocate tx_buf_ptrs */ - eth_ipa_ctx.tx_queue->skb = + eth_ipa_ctx.tx_queue[type]->skb = kzalloc(sizeof(struct sk_buff *) * - eth_ipa_ctx.tx_queue->desc_cnt, GFP_KERNEL); - if (!eth_ipa_ctx.tx_queue->skb) { - ETHQOSERR("ERROR: Unable to allocate Tx buff ptrs\n"); + eth_ipa_ctx.tx_queue[type]->desc_cnt, GFP_KERNEL); + if (!eth_ipa_ctx.tx_queue[type]->skb) { + ETHQOSERR("ERR: Unable to allocate Tx buff ptrs for %d\n", + type); ret = -ENOMEM; goto err_out_tx_buf_ptrs_failed; } /* Allocate ipa_tx_buff_pool_va_addrs_base */ - eth_ipa_ctx.tx_queue->ipa_tx_buff_pool_va_addrs_base = - kzalloc(sizeof(void *) * eth_ipa_ctx.tx_queue->desc_cnt, GFP_KERNEL); - if (!eth_ipa_ctx.tx_queue->ipa_tx_buff_pool_va_addrs_base) { - ETHQOSERR("ERROR: Unable to allocate Tx ipa buff addrs\n"); + eth_ipa_ctx.tx_queue[type]->ipa_tx_buff_pool_va_addrs_base = + kzalloc(sizeof(void *) * eth_ipa_ctx.tx_queue[type]->desc_cnt, + GFP_KERNEL); + if (!eth_ipa_ctx.tx_queue[type]->ipa_tx_buff_pool_va_addrs_base) { + ETHQOSERR("ERR: Unable to allocate Tx ipa buff addrs for %d\n", + type); ret = -ENOMEM; goto err_out_tx_buf_ptrs_failed; } - eth_ipa_ctx.tx_queue->skb_dma = - kzalloc(sizeof(dma_addr_t) * eth_ipa_ctx.tx_queue->desc_cnt, + eth_ipa_ctx.tx_queue[type]->skb_dma = + kzalloc(sizeof(dma_addr_t) * eth_ipa_ctx.tx_queue[type]->desc_cnt, GFP_KERNEL); - if (!eth_ipa_ctx.tx_queue->skb_dma) { - ETHQOSERR("ERROR: Unable to allocate Tx ipa buff addrs\n"); + if (!eth_ipa_ctx.tx_queue[type]->skb_dma) { + ETHQOSERR("ERR: Unable to allocate Tx ipa buff addrs for %d\n", + type); ret = -ENOMEM; goto err_out_tx_buf_ptrs_failed; } - eth_ipa_ctx.tx_queue->ipa_tx_buff_phy_addr = - kzalloc(sizeof(phys_addr_t) * eth_ipa_ctx.tx_queue->desc_cnt, + eth_ipa_ctx.tx_queue[type]->ipa_tx_phy_addr = + kzalloc(sizeof(phys_addr_t) * eth_ipa_ctx.tx_queue[type]->desc_cnt, GFP_KERNEL); - if (!eth_ipa_ctx.tx_queue->ipa_tx_buff_phy_addr) { - ETHQOSERR("ERROR: Unable to allocate Tx ipa buff dma addrs\n"); + if (!eth_ipa_ctx.tx_queue[type]->ipa_tx_phy_addr) { + ETHQOSERR("ERROR: Unable to allocate Tx ipa buff dma addrs\n"); ret = -ENOMEM; goto err_out_tx_buf_ptrs_failed; } ETHQOSDBG("<--ethqos_alloc_tx_queue_struct\n"); - eth_ipa_ctx.tx_queue->tx_q = &priv->tx_queue[chinx]; + eth_ipa_ctx.tx_queue[type]->tx_q = &priv->tx_queue[chinx]; return ret; err_out_tx_buf_ptrs_failed: - kfree(eth_ipa_ctx.tx_queue->tx_desc_dma_addrs); - eth_ipa_ctx.tx_queue->tx_desc_dma_addrs = NULL; - kfree(eth_ipa_ctx.tx_queue->skb); - eth_ipa_ctx.tx_queue->skb = NULL; - kfree(eth_ipa_ctx.tx_queue->skb_dma); - eth_ipa_ctx.tx_queue->skb_dma = NULL; - kfree(eth_ipa_ctx.tx_queue->ipa_tx_buff_pool_va_addrs_base); - eth_ipa_ctx.tx_queue->ipa_tx_buff_pool_va_addrs_base = NULL; - kfree(eth_ipa_ctx.tx_queue->ipa_tx_buff_phy_addr); - eth_ipa_ctx.tx_queue->ipa_tx_buff_phy_addr = NULL; + kfree(eth_ipa_ctx.tx_queue[type]->tx_desc_dma_addrs); + eth_ipa_ctx.tx_queue[type]->tx_desc_dma_addrs = NULL; + kfree(eth_ipa_ctx.tx_queue[type]->skb); + eth_ipa_ctx.tx_queue[type]->skb = NULL; + kfree(eth_ipa_ctx.tx_queue[type]->skb_dma); + eth_ipa_ctx.tx_queue[type]->skb_dma = NULL; + kfree(eth_ipa_ctx.tx_queue[type]->ipa_tx_buff_pool_va_addrs_base); + eth_ipa_ctx.tx_queue[type]->ipa_tx_buff_pool_va_addrs_base = NULL; + kfree(eth_ipa_ctx.tx_queue[type]->ipa_tx_phy_addr); + eth_ipa_ctx.tx_queue[type]->ipa_tx_phy_addr = NULL; err_out_tx_desc_dma_addrs_failed: - kfree(eth_ipa_ctx.tx_queue->tx_desc_ptrs); - eth_ipa_ctx.tx_queue->tx_desc_ptrs = NULL; + kfree(eth_ipa_ctx.tx_queue[type]->tx_desc_ptrs); + eth_ipa_ctx.tx_queue[type]->tx_desc_ptrs = NULL; err_out_tx_desc_ptrs_failed: - kfree(eth_ipa_ctx.tx_queue); - eth_ipa_ctx.tx_queue = NULL; + kfree(eth_ipa_ctx.tx_queue[type]); + eth_ipa_ctx.tx_queue[type] = NULL; err_out_tx_q_alloc_failed: return ret; } -static void ethqos_free_ipa_tx_queue_struct(struct qcom_ethqos *ethqos) +static void ethqos_free_ipa_tx_queue_struct(struct qcom_ethqos *ethqos, + enum ipa_queue_type type) { - kfree(eth_ipa_ctx.tx_queue->skb_dma); - eth_ipa_ctx.tx_queue->skb_dma = NULL; + kfree(eth_ipa_ctx.tx_queue[type]->skb_dma); + eth_ipa_ctx.tx_queue[type]->skb_dma = NULL; - kfree(eth_ipa_ctx.tx_queue->tx_desc_dma_addrs); - eth_ipa_ctx.tx_queue->tx_desc_dma_addrs = NULL; + kfree(eth_ipa_ctx.tx_queue[type]->tx_desc_dma_addrs); + eth_ipa_ctx.tx_queue[type]->tx_desc_dma_addrs = NULL; - kfree(eth_ipa_ctx.tx_queue->tx_desc_ptrs); - eth_ipa_ctx.tx_queue->tx_desc_ptrs = NULL; + kfree(eth_ipa_ctx.tx_queue[type]->tx_desc_ptrs); + eth_ipa_ctx.tx_queue[type]->tx_desc_ptrs = NULL; - kfree(eth_ipa_ctx.tx_queue->ipa_tx_buff_pool_va_addrs_base); - eth_ipa_ctx.tx_queue->ipa_tx_buff_pool_va_addrs_base = NULL; + kfree(eth_ipa_ctx.tx_queue[type]->ipa_tx_buff_pool_va_addrs_base); + eth_ipa_ctx.tx_queue[type]->ipa_tx_buff_pool_va_addrs_base = NULL; - kfree(eth_ipa_ctx.tx_queue->skb); - eth_ipa_ctx.tx_queue->skb = NULL; + kfree(eth_ipa_ctx.tx_queue[type]->skb); + eth_ipa_ctx.tx_queue[type]->skb = NULL; } -static int ethqos_alloc_ipa_rx_queue_struct(struct qcom_ethqos *ethqos) +static int ethqos_alloc_ipa_rx_queue_struct(struct qcom_ethqos *ethqos, + enum ipa_queue_type type) { int ret = 0, chinx, cnt; - chinx = 0; - eth_ipa_ctx.rx_queue = + chinx = eth_ipa_queue_type_to_rx_queue(type); + + eth_ipa_ctx.rx_queue[type] = kzalloc(sizeof(struct ethqos_rx_queue), GFP_KERNEL); - if (!eth_ipa_ctx.rx_queue) { + if (!eth_ipa_ctx.rx_queue[type]) { ETHQOSERR("ERROR: Unable to allocate Rx queue structure\n"); ret = -ENOMEM; goto err_out_rx_q_alloc_failed; } - eth_ipa_ctx.rx_queue->desc_cnt = eth_ipa_ctx.ipa_dma_rx_desc_cnt; + eth_ipa_ctx.rx_queue[type]->desc_cnt = + eth_ipa_queue_type_to_rx_desc_count(type); /* Allocate rx_desc_ptrs */ - eth_ipa_ctx.rx_queue->rx_desc_ptrs = - kcalloc(eth_ipa_ctx.rx_queue->desc_cnt, + eth_ipa_ctx.rx_queue[type]->rx_desc_ptrs = + kcalloc(eth_ipa_ctx.rx_queue[type]->desc_cnt, sizeof(struct dma_desc *), GFP_KERNEL); - if (!eth_ipa_ctx.rx_queue->rx_desc_ptrs) { + if (!eth_ipa_ctx.rx_queue[type]->rx_desc_ptrs) { ETHQOSERR("ERROR: Unable to allocate Rx Desc ptrs\n"); ret = -ENOMEM; goto err_out_rx_desc_ptrs_failed; } - for (cnt = 0; cnt < eth_ipa_ctx.rx_queue->desc_cnt; cnt++) { - eth_ipa_ctx.rx_queue->rx_desc_ptrs[cnt] - = eth_ipa_ctx.rx_queue->rx_desc_ptrs[0] + + for (cnt = 0; cnt < eth_ipa_ctx.rx_queue[type]->desc_cnt; cnt++) { + eth_ipa_ctx.rx_queue[type]->rx_desc_ptrs[cnt] + = eth_ipa_ctx.rx_queue[type]->rx_desc_ptrs[0] + (sizeof(struct dma_desc *) * cnt); } /* Allocate rx_desc_dma_addrs */ - eth_ipa_ctx.rx_queue->rx_desc_dma_addrs = - kzalloc(sizeof(dma_addr_t) * eth_ipa_ctx.rx_queue->desc_cnt, - GFP_KERNEL); - if (!eth_ipa_ctx.rx_queue->rx_desc_dma_addrs) { + eth_ipa_ctx.rx_queue[type]->rx_desc_dma_addrs = + kzalloc(sizeof(dma_addr_t) * eth_ipa_ctx.rx_queue[type]->desc_cnt, + GFP_KERNEL); + if (!eth_ipa_ctx.rx_queue[type]->rx_desc_dma_addrs) { ETHQOSERR("ERROR: Unable to allocate Rx Desc dma addr\n"); ret = -ENOMEM; goto err_out_rx_desc_dma_addrs_failed; } - for (cnt = 0; cnt < eth_ipa_ctx.rx_queue->desc_cnt; cnt++) { - eth_ipa_ctx.rx_queue->rx_desc_dma_addrs[cnt] - = eth_ipa_ctx.rx_queue->rx_desc_dma_addrs[0] + for (cnt = 0; cnt < eth_ipa_ctx.rx_queue[type]->desc_cnt; cnt++) { + eth_ipa_ctx.rx_queue[type]->rx_desc_dma_addrs[cnt] + = eth_ipa_ctx.rx_queue[type]->rx_desc_dma_addrs[0] + (sizeof(dma_addr_t) * cnt); } /* Allocat rx_ipa_buff */ - eth_ipa_ctx.rx_queue->skb = + eth_ipa_ctx.rx_queue[type]->skb = kzalloc(sizeof(struct sk_buff *) * - eth_ipa_ctx.rx_queue->desc_cnt, GFP_KERNEL); - if (!eth_ipa_ctx.rx_queue->skb) { + eth_ipa_ctx.rx_queue[type]->desc_cnt, GFP_KERNEL); + if (!eth_ipa_ctx.rx_queue[type]->skb) { ETHQOSERR("ERROR: Unable to allocate Tx buff ptrs\n"); ret = -ENOMEM; goto err_out_rx_buf_ptrs_failed; } - eth_ipa_ctx.rx_queue->ipa_buff_va = + eth_ipa_ctx.rx_queue[type]->ipa_buff_va = kzalloc(sizeof(void *) * - eth_ipa_ctx.rx_queue->desc_cnt, GFP_KERNEL); - if (!eth_ipa_ctx.rx_queue->ipa_buff_va) { + eth_ipa_ctx.rx_queue[type]->desc_cnt, GFP_KERNEL); + if (!eth_ipa_ctx.rx_queue[type]->ipa_buff_va) { ETHQOSERR("ERROR: Unable to allocate Tx buff ptrs\n"); ret = -ENOMEM; goto err_out_rx_buf_ptrs_failed; } /* Allocate ipa_rx_buff_pool_va_addrs_base */ - eth_ipa_ctx.rx_queue->ipa_rx_buff_pool_va_addrs_base = - kzalloc(sizeof(void *) * eth_ipa_ctx.rx_queue->desc_cnt, + eth_ipa_ctx.rx_queue[type]->ipa_rx_buff_pool_va_addrs_base = + kzalloc(sizeof(void *) * eth_ipa_ctx.rx_queue[type]->desc_cnt, GFP_KERNEL); - if (!eth_ipa_ctx.rx_queue->ipa_rx_buff_pool_va_addrs_base) { + if (!eth_ipa_ctx.rx_queue[type]->ipa_rx_buff_pool_va_addrs_base) { ETHQOSERR("ERROR: Unable to allocate Rx ipa buff addrs\n"); ret = -ENOMEM; goto err_out_rx_buf_ptrs_failed; } - eth_ipa_ctx.rx_queue->skb_dma = - kzalloc(sizeof(dma_addr_t) * eth_ipa_ctx.rx_queue->desc_cnt, + eth_ipa_ctx.rx_queue[type]->skb_dma = + kzalloc(sizeof(dma_addr_t) * eth_ipa_ctx.rx_queue[type]->desc_cnt, GFP_KERNEL); - if (!eth_ipa_ctx.rx_queue->skb_dma) { + if (!eth_ipa_ctx.rx_queue[type]->skb_dma) { ETHQOSERR("ERROR: Unable to allocate rx ipa buff addrs\n"); ret = -ENOMEM; goto err_out_rx_buf_ptrs_failed; } - eth_ipa_ctx.rx_queue->ipa_rx_buff_phy_addr = - kzalloc(sizeof(phys_addr_t) * eth_ipa_ctx.rx_queue->desc_cnt, + eth_ipa_ctx.rx_queue[type]->ipa_rx_buff_phy_addr = + kzalloc(sizeof(phys_addr_t) * eth_ipa_ctx.rx_queue[type]->desc_cnt, GFP_KERNEL); - if (!eth_ipa_ctx.rx_queue->ipa_rx_buff_phy_addr) { + if (!eth_ipa_ctx.rx_queue[type]->ipa_rx_buff_phy_addr) { ETHQOSERR("ERROR: Unable to allocate rx ipa buff dma addrs\n"); ret = -ENOMEM; goto err_out_rx_buf_ptrs_failed; @@ -308,152 +681,157 @@ static int ethqos_alloc_ipa_rx_queue_struct(struct qcom_ethqos *ethqos) return ret; err_out_rx_buf_ptrs_failed: - kfree(eth_ipa_ctx.rx_queue->rx_desc_dma_addrs); - eth_ipa_ctx.rx_queue->rx_desc_dma_addrs = NULL; - kfree(eth_ipa_ctx.rx_queue->skb); - eth_ipa_ctx.rx_queue->skb = NULL; - kfree(eth_ipa_ctx.rx_queue->skb_dma); - eth_ipa_ctx.rx_queue->skb_dma = NULL; - kfree(eth_ipa_ctx.rx_queue->ipa_rx_buff_pool_va_addrs_base); - eth_ipa_ctx.rx_queue->ipa_rx_buff_pool_va_addrs_base = NULL; - kfree(eth_ipa_ctx.rx_queue->ipa_rx_buff_phy_addr); - eth_ipa_ctx.rx_queue->ipa_rx_buff_phy_addr = NULL; - kfree(eth_ipa_ctx.rx_queue->ipa_buff_va); - eth_ipa_ctx.rx_queue->ipa_buff_va = NULL; + kfree(eth_ipa_ctx.rx_queue[type]->rx_desc_dma_addrs); + eth_ipa_ctx.rx_queue[type]->rx_desc_dma_addrs = NULL; + kfree(eth_ipa_ctx.rx_queue[type]->skb); + eth_ipa_ctx.rx_queue[type]->skb = NULL; + kfree(eth_ipa_ctx.rx_queue[type]->skb_dma); + eth_ipa_ctx.rx_queue[type]->skb_dma = NULL; + kfree(eth_ipa_ctx.rx_queue[type]->ipa_rx_buff_pool_va_addrs_base); + eth_ipa_ctx.rx_queue[type]->ipa_rx_buff_pool_va_addrs_base = NULL; + kfree(eth_ipa_ctx.rx_queue[type]->ipa_rx_buff_phy_addr); + eth_ipa_ctx.rx_queue[type]->ipa_rx_buff_phy_addr = NULL; + kfree(eth_ipa_ctx.rx_queue[type]->ipa_buff_va); + eth_ipa_ctx.rx_queue[type]->ipa_buff_va = NULL; err_out_rx_desc_dma_addrs_failed: - kfree(eth_ipa_ctx.rx_queue->rx_desc_ptrs); - eth_ipa_ctx.rx_queue->rx_desc_ptrs = NULL; + kfree(eth_ipa_ctx.rx_queue[type]->rx_desc_ptrs); + eth_ipa_ctx.rx_queue[type]->rx_desc_ptrs = NULL; err_out_rx_desc_ptrs_failed: - kfree(eth_ipa_ctx.rx_queue); - eth_ipa_ctx.rx_queue = NULL; + kfree(eth_ipa_ctx.rx_queue[type]); + eth_ipa_ctx.rx_queue[type] = NULL; err_out_rx_q_alloc_failed: return ret; } -static void ethqos_free_ipa_rx_queue_struct(struct qcom_ethqos *ethqos) +static void ethqos_free_ipa_rx_queue_struct(struct qcom_ethqos *ethqos, + enum ipa_queue_type type) { - kfree(eth_ipa_ctx.rx_queue->skb); - eth_ipa_ctx.rx_queue->skb = NULL; + kfree(eth_ipa_ctx.rx_queue[type]->skb); + eth_ipa_ctx.rx_queue[type]->skb = NULL; - kfree(eth_ipa_ctx.rx_queue->rx_desc_dma_addrs); - eth_ipa_ctx.rx_queue->rx_desc_dma_addrs = NULL; + kfree(eth_ipa_ctx.rx_queue[type]->rx_desc_dma_addrs); + eth_ipa_ctx.rx_queue[type]->rx_desc_dma_addrs = NULL; - kfree(eth_ipa_ctx.rx_queue->rx_desc_ptrs); - eth_ipa_ctx.rx_queue->rx_desc_ptrs = NULL; + kfree(eth_ipa_ctx.rx_queue[type]->rx_desc_ptrs); + eth_ipa_ctx.rx_queue[type]->rx_desc_ptrs = NULL; - kfree(eth_ipa_ctx.rx_queue->ipa_rx_buff_pool_va_addrs_base); - eth_ipa_ctx.rx_queue->ipa_rx_buff_pool_va_addrs_base = NULL; + kfree(eth_ipa_ctx.rx_queue[type]->ipa_rx_buff_pool_va_addrs_base); + eth_ipa_ctx.rx_queue[type]->ipa_rx_buff_pool_va_addrs_base = NULL; - kfree(eth_ipa_ctx.rx_queue->skb); - eth_ipa_ctx.rx_queue->skb = NULL; + kfree(eth_ipa_ctx.rx_queue[type]->skb); + eth_ipa_ctx.rx_queue[type]->skb = NULL; } static void ethqos_rx_buf_free_mem(struct qcom_ethqos *ethqos, - unsigned int rx_qcnt) + enum ipa_queue_type type) { struct net_device *ndev = dev_get_drvdata(ðqos->pdev->dev); struct stmmac_priv *priv = netdev_priv(ndev); /* Deallocate RX Buffer Pool Structure */ /* Free memory pool for RX offload path */ - /* Currently only IPA_DMA_RX_CH is supported */ - if (eth_ipa_ctx.rx_queue->ipa_rx_buff_pool_pa_addrs_base) { + if (eth_ipa_ctx.rx_queue[type]->ipa_rx_pa_addrs_base) { dma_free_coherent (GET_MEM_PDEV_DEV, - sizeof(dma_addr_t) * eth_ipa_ctx.rx_queue->desc_cnt, - eth_ipa_ctx.rx_queue->ipa_rx_buff_pool_pa_addrs_base, - eth_ipa_ctx.rx_queue->ipa_rx_buff_pool_pa_addrs_base_dmahndl); - eth_ipa_ctx.rx_queue->ipa_rx_buff_pool_pa_addrs_base = NULL; - eth_ipa_ctx.rx_queue->ipa_rx_buff_pool_pa_addrs_base_dmahndl + sizeof(dma_addr_t) * eth_ipa_ctx.rx_queue[type]->desc_cnt, + eth_ipa_ctx.rx_queue[type]->ipa_rx_pa_addrs_base, + eth_ipa_ctx.rx_queue[type]->ipa_rx_pa_addrs_base_dmahndl); + + eth_ipa_ctx.rx_queue[type]->ipa_rx_pa_addrs_base = + NULL; + eth_ipa_ctx.rx_queue[type]->ipa_rx_pa_addrs_base_dmahndl = (dma_addr_t)NULL; ETHQOSDBG("Freed Rx Buffer Pool Structure for IPA\n"); } else { ETHQOSERR("Unable to DeAlloc RX Buff structure\n"); } - - ETHQOSDBG("\n"); } static void ethqos_rx_desc_free_mem(struct qcom_ethqos *ethqos, - unsigned int rx_qcnt) + enum ipa_queue_type type) { struct net_device *ndev = dev_get_drvdata(ðqos->pdev->dev); struct stmmac_priv *priv = netdev_priv(ndev); - ETHQOSDBG("-->DWC_ETH_QOS_rx_desc_free_mem: rx_qcnt = %d\n", rx_qcnt); + ETHQOSDBG("rx_queue = %d\n", eth_ipa_queue_type_to_rx_queue(type)); - if (eth_ipa_ctx.rx_queue->rx_desc_ptrs[0]) { + if (eth_ipa_ctx.rx_queue[type]->rx_desc_ptrs[0]) { dma_free_coherent (GET_MEM_PDEV_DEV, (sizeof(struct dma_desc) * - eth_ipa_ctx.rx_queue->desc_cnt), - eth_ipa_ctx.rx_queue->rx_desc_ptrs[0], - eth_ipa_ctx.rx_queue->rx_desc_dma_addrs[0]); - eth_ipa_ctx.rx_queue->rx_desc_ptrs[0] = NULL; + eth_ipa_ctx.rx_queue[type]->desc_cnt), + eth_ipa_ctx.rx_queue[type]->rx_desc_ptrs[0], + eth_ipa_ctx.rx_queue[type]->rx_desc_dma_addrs[0]); + eth_ipa_ctx.rx_queue[type]->rx_desc_ptrs[0] = NULL; } ETHQOSDBG("\n"); } static void ethqos_tx_buf_free_mem(struct qcom_ethqos *ethqos, - unsigned int tx_qcnt) + enum ipa_queue_type type) { unsigned int i = 0; struct net_device *ndev = dev_get_drvdata(ðqos->pdev->dev); struct stmmac_priv *priv = netdev_priv(ndev); - ETHQOSDBG(": tx_qcnt = %d\n", tx_qcnt); + ETHQOSDBG("tx_queue = %d\n", eth_ipa_queue_type_to_tx_queue(type)); - for (i = 0; i < eth_ipa_ctx.tx_queue->desc_cnt; i++) { + for (i = 0; i < eth_ipa_ctx.tx_queue[type]->desc_cnt; i++) { dma_free_coherent (GET_MEM_PDEV_DEV, - ETHQOS_ETH_FRAME_LEN_IPA, - eth_ipa_ctx.tx_queue->ipa_tx_buff_pool_va_addrs_base[i], - eth_ipa_ctx.tx_queue->ipa_tx_buff_pool_pa_addrs_base[i]); + eth_ipa_queue_type_to_buf_length(type), + eth_ipa_ctx.tx_queue[type]->ipa_tx_buff_pool_va_addrs_base[i], + eth_ipa_ctx.tx_queue[type]->ipa_tx_pa_addrs_base[i]); } - ETHQOSDBG("Freed the memory allocated for IPA_DMA_TX_CH\n"); + ETHQOSDBG("Freed the memory allocated for %d\n", + eth_ipa_queue_type_to_tx_queue(type)); /* De-Allocate TX DMA Buffer Pool Structure */ - if (eth_ipa_ctx.tx_queue->ipa_tx_buff_pool_pa_addrs_base) { + if (eth_ipa_ctx.tx_queue[type]->ipa_tx_pa_addrs_base) { dma_free_coherent (GET_MEM_PDEV_DEV, - sizeof(dma_addr_t) * eth_ipa_ctx.tx_queue->desc_cnt, - eth_ipa_ctx.tx_queue->ipa_tx_buff_pool_pa_addrs_base, - eth_ipa_ctx.tx_queue->ipa_tx_buff_pool_pa_addrs_base_dmahndl); - eth_ipa_ctx.tx_queue->ipa_tx_buff_pool_pa_addrs_base = NULL; - eth_ipa_ctx.tx_queue->ipa_tx_buff_pool_pa_addrs_base_dmahndl + sizeof(dma_addr_t) * + eth_ipa_ctx.tx_queue[type]->desc_cnt, + eth_ipa_ctx.tx_queue[type]->ipa_tx_pa_addrs_base, + eth_ipa_ctx.tx_queue[type]->ipa_tx_pa_addrs_base_dmahndl); + + eth_ipa_ctx.tx_queue[type]->ipa_tx_pa_addrs_base = NULL; + eth_ipa_ctx.tx_queue[type]->ipa_tx_pa_addrs_base_dmahndl = (dma_addr_t)NULL; - ETHQOSDBG("Freed TX Buffer Pool Structure for IPA\n"); + ETHQOSERR("Freed TX Buffer Pool Structure for IPA\n"); } else { - ETHQOSDBG("Unable to DeAlloc TX Buff structure\n"); + ETHQOSERR("Unable to DeAlloc TX Buff structure\n"); } ETHQOSDBG("\n"); } static void ethqos_tx_desc_free_mem(struct qcom_ethqos *ethqos, - unsigned int tx_qcnt) + enum ipa_queue_type type) { struct net_device *ndev = dev_get_drvdata(ðqos->pdev->dev); struct stmmac_priv *priv = netdev_priv(ndev); - ETHQOSDBG("tx_qcnt = %d\n", tx_qcnt); + ETHQOSDBG("tx_queue = %d\n", eth_ipa_queue_type_to_tx_queue(type)); - if (eth_ipa_ctx.tx_queue->tx_desc_ptrs[0]) { + if (eth_ipa_ctx.tx_queue[type]->tx_desc_ptrs[0]) { dma_free_coherent (GET_MEM_PDEV_DEV, (sizeof(struct dma_desc) * - eth_ipa_ctx.tx_queue->desc_cnt), - eth_ipa_ctx.tx_queue->tx_desc_ptrs[0], - eth_ipa_ctx.tx_queue->tx_desc_dma_addrs[0]); - eth_ipa_ctx.tx_queue->tx_desc_ptrs[0] = NULL; + eth_ipa_ctx.tx_queue[type]->desc_cnt), + eth_ipa_ctx.tx_queue[type]->tx_desc_ptrs[0], + eth_ipa_ctx.tx_queue[type]->tx_desc_dma_addrs[0]); + + eth_ipa_ctx.tx_queue[type]->tx_desc_ptrs[0] = NULL; } ETHQOSDBG("\n"); } -static int allocate_ipa_buffer_and_desc(struct qcom_ethqos *ethqos) +static int allocate_ipa_buffer_and_desc( + struct qcom_ethqos *ethqos, enum ipa_queue_type type) { int ret = 0; unsigned int qinx = 0; @@ -461,56 +839,57 @@ static int allocate_ipa_buffer_and_desc(struct qcom_ethqos *ethqos) struct stmmac_priv *priv = netdev_priv(ndev); /* TX descriptors */ - eth_ipa_ctx.tx_queue->tx_desc_ptrs[0] = dma_alloc_coherent( + eth_ipa_ctx.tx_queue[type]->tx_desc_ptrs[0] = dma_alloc_coherent( GET_MEM_PDEV_DEV, - (sizeof(struct dma_desc) * eth_ipa_ctx.tx_queue->desc_cnt), - (eth_ipa_ctx.tx_queue->tx_desc_dma_addrs), + (sizeof(struct dma_desc) * eth_ipa_ctx.tx_queue[type]->desc_cnt), + (eth_ipa_ctx.tx_queue[type]->tx_desc_dma_addrs), GFP_KERNEL); - if (!eth_ipa_ctx.tx_queue->tx_desc_ptrs[0]) { + if (!eth_ipa_ctx.tx_queue[type]->tx_desc_ptrs[0]) { ret = -ENOMEM; goto err_out_tx_desc; } - ETHQOSDBG("Tx Queue(%d) desc base dma address: %pK\n", - qinx, eth_ipa_ctx.tx_queue->tx_desc_dma_addrs); + ETHQOSERR("Tx Queue(%d) desc base dma address: %pK\n", + qinx, eth_ipa_ctx.tx_queue[type]->tx_desc_dma_addrs); /* Allocate descriptors and buffers memory for all RX queues */ /* RX descriptors */ - eth_ipa_ctx.rx_queue->rx_desc_ptrs[0] = dma_alloc_coherent( + eth_ipa_ctx.rx_queue[type]->rx_desc_ptrs[0] = dma_alloc_coherent( GET_MEM_PDEV_DEV, - (sizeof(struct dma_desc) * eth_ipa_ctx.rx_queue->desc_cnt), - (eth_ipa_ctx.rx_queue->rx_desc_dma_addrs), GFP_KERNEL); - if (!eth_ipa_ctx.rx_queue->rx_desc_ptrs[0]) { + (sizeof(struct dma_desc) * eth_ipa_ctx.rx_queue[type]->desc_cnt), + (eth_ipa_ctx.rx_queue[type]->rx_desc_dma_addrs), GFP_KERNEL); + if (!eth_ipa_ctx.rx_queue[type]->rx_desc_ptrs[0]) { ret = -ENOMEM; goto rx_alloc_failure; } ETHQOSDBG("Rx Queue(%d) desc base dma address: %pK\n", - qinx, eth_ipa_ctx.rx_queue->rx_desc_dma_addrs); + qinx, eth_ipa_ctx.rx_queue[type]->rx_desc_dma_addrs); ETHQOSDBG("\n"); return ret; rx_alloc_failure: - ethqos_rx_desc_free_mem(ethqos, qinx); + ethqos_rx_desc_free_mem(ethqos, type); err_out_tx_desc: - ethqos_tx_desc_free_mem(ethqos, qinx); + ethqos_tx_desc_free_mem(ethqos, type); return ret; } static void ethqos_ipa_tx_desc_init(struct qcom_ethqos *ethqos, - unsigned int QINX) + enum ipa_queue_type type) { int i = 0; struct dma_desc *TX_NORMAL_DESC; + unsigned int qinx = eth_ipa_queue_type_to_tx_queue(type); ETHQOSDBG("-->tx_descriptor_init\n"); /* initialze all descriptors. */ - for (i = 0; i < eth_ipa_ctx.tx_queue->desc_cnt; i++) { - TX_NORMAL_DESC = eth_ipa_ctx.tx_queue->tx_desc_ptrs[i]; + for (i = 0; i < eth_ipa_ctx.tx_queue[type]->desc_cnt; i++) { + TX_NORMAL_DESC = eth_ipa_ctx.tx_queue[type]->tx_desc_ptrs[i]; TX_NORMAL_DESC->des0 = 0; /* update buffer 2 address pointer to zero */ TX_NORMAL_DESC->des1 = 0; @@ -522,15 +901,15 @@ static void ethqos_ipa_tx_desc_init(struct qcom_ethqos *ethqos, TX_NORMAL_DESC->des3 = 0; } /* update the total no of Tx descriptors count */ - DMA_TDRLR_RGWR(QINX, (eth_ipa_ctx.tx_queue->desc_cnt - 1)); + DMA_TDRLR_RGWR(qinx, (eth_ipa_ctx.tx_queue[type]->desc_cnt - 1)); /* update the starting address of desc chain/ring */ - DMA_TDLAR_RGWR(QINX, eth_ipa_ctx.tx_queue->tx_desc_dma_addrs[0]); + DMA_TDLAR_RGWR(qinx, eth_ipa_ctx.tx_queue[type]->tx_desc_dma_addrs[0]); ETHQOSDBG("\n"); } static void ethqos_ipa_rx_desc_init(struct qcom_ethqos *ethqos, - unsigned int QINX) + enum ipa_queue_type type) { int cur_rx = 0; struct dma_desc *RX_NORMAL_DESC; @@ -538,17 +917,20 @@ static void ethqos_ipa_rx_desc_init(struct qcom_ethqos *ethqos, int start_index = cur_rx; int last_index; unsigned int VARRDES3 = 0; + unsigned int qinx = eth_ipa_queue_type_to_rx_queue(type); ETHQOSDBG("\n"); - /* initialize all desc */ + if (!eth_ipa_ctx.rx_queue[type]->desc_cnt) + return; - for (i = 0; i < eth_ipa_ctx.rx_queue->desc_cnt; i++) { - RX_NORMAL_DESC = eth_ipa_ctx.rx_queue->rx_desc_ptrs[i]; + /* initialize all desc */ + for (i = 0; i < eth_ipa_ctx.rx_queue[type]->desc_cnt; i++) { + RX_NORMAL_DESC = eth_ipa_ctx.rx_queue[type]->rx_desc_ptrs[i]; memset(RX_NORMAL_DESC, 0, sizeof(struct dma_desc)); /* update buffer 1 address pointer */ RX_NORMAL_DESC->des0 - = cpu_to_le32(eth_ipa_ctx.rx_queue->skb_dma[i]); + = cpu_to_le32(eth_ipa_ctx.rx_queue[type]->skb_dma[i]); /* set to zero */ RX_NORMAL_DESC->des1 = 0; @@ -560,27 +942,36 @@ static void ethqos_ipa_rx_desc_init(struct qcom_ethqos *ethqos, /* Don't Set the IOC bit for IPA controlled Desc */ VARRDES3 = le32_to_cpu(RX_NORMAL_DESC->des3); - /* reset IOC for all buffers */ - RX_NORMAL_DESC->des3 = cpu_to_le32(VARRDES3 & ~(1 << 30)); + + /* reset IOC as per rx intr moderation count */ + if (!eth_ipa_ctx.rx_intr_mod_cnt[type] || + (eth_ipa_ctx.rx_intr_mod_cnt[type] && + (i % eth_ipa_ctx.rx_intr_mod_cnt[type]))) + RX_NORMAL_DESC->des3 = cpu_to_le32(VARRDES3 & + ~(1 << 30)); } /* update the total no of Rx descriptors count */ - DMA_RDRLR_RGWR(QINX, (eth_ipa_ctx.rx_queue->desc_cnt - 1)); + DMA_RDRLR_RGWR(qinx, (eth_ipa_ctx.rx_queue[type]->desc_cnt - 1)); /* update the Rx Descriptor Tail Pointer */ - last_index - = GET_RX_CURRENT_RCVD_LAST_DESC_INDEX(start_index, 0, - eth_ipa_ctx.rx_queue->desc_cnt); - DMA_RDTP_RPDR_RGWR(QINX, - eth_ipa_ctx.rx_queue->rx_desc_dma_addrs[last_index]); + last_index = + GET_RX_CURRENT_RCVD_LAST_DESC_INDEX( + start_index, 0, + eth_ipa_ctx.rx_queue[type]->desc_cnt); + DMA_RDTP_RPDR_RGWR( + qinx, + eth_ipa_ctx.rx_queue[type]->rx_desc_dma_addrs[last_index]); /* update the starting address of desc chain/ring */ - DMA_RDLAR_RGWR(QINX, - eth_ipa_ctx.rx_queue->rx_desc_dma_addrs[start_index]); + DMA_RDLAR_RGWR( + qinx, + eth_ipa_ctx.rx_queue[type]->rx_desc_dma_addrs[start_index]); ETHQOSDBG("\n"); } -static int ethqos_alloc_ipa_rx_buf(struct qcom_ethqos *ethqos, - unsigned int i, gfp_t gfp) +static int ethqos_alloc_ipa_rx_buf( + struct qcom_ethqos *ethqos, unsigned int i, gfp_t gfp, + enum ipa_queue_type type) { unsigned int rx_buffer_len; dma_addr_t ipa_rx_buf_dma_addr; @@ -592,27 +983,28 @@ static int ethqos_alloc_ipa_rx_buf(struct qcom_ethqos *ethqos, ETHQOSDBG("\n"); - rx_buffer_len = ETHQOS_ETH_FRAME_LEN_IPA; - eth_ipa_ctx.rx_queue->ipa_buff_va[i] = dma_alloc_coherent( + rx_buffer_len = eth_ipa_queue_type_to_buf_length(type); + eth_ipa_ctx.rx_queue[type]->ipa_buff_va[i] = dma_alloc_coherent( GET_MEM_PDEV_DEV, rx_buffer_len, &ipa_rx_buf_dma_addr, GFP_KERNEL); - if (!eth_ipa_ctx.rx_queue->ipa_buff_va[i]) { + if (!eth_ipa_ctx.rx_queue[type]->ipa_buff_va[i]) { dev_alert(ðqos->pdev->dev, "Failed to allocate RX dma buf for IPA\n"); return -ENOMEM; } - eth_ipa_ctx.rx_queue->skb_dma[i] = ipa_rx_buf_dma_addr; + eth_ipa_ctx.rx_queue[type]->skb_dma[i] = ipa_rx_buf_dma_addr; buff_sgt = kzalloc(sizeof(*buff_sgt), GFP_KERNEL); if (buff_sgt) { - ret = dma_get_sgtable(GET_MEM_PDEV_DEV, buff_sgt, - eth_ipa_ctx.rx_queue->ipa_buff_va[i], - ipa_rx_buf_dma_addr, - rx_buffer_len); + ret = dma_get_sgtable( + GET_MEM_PDEV_DEV, buff_sgt, + eth_ipa_ctx.rx_queue[type]->ipa_buff_va[i], + ipa_rx_buf_dma_addr, + rx_buffer_len); if (ret == 0) { - eth_ipa_ctx.rx_queue->ipa_rx_buff_phy_addr[i] + eth_ipa_ctx.rx_queue[type]->ipa_rx_buff_phy_addr[i] = sg_phys(buff_sgt->sgl); sg_free_table(buff_sgt); } else { @@ -629,11 +1021,11 @@ static int ethqos_alloc_ipa_rx_buf(struct qcom_ethqos *ethqos, static void ethqos_wrapper_tx_descriptor_init_single_q( struct qcom_ethqos *ethqos, - unsigned int qinx) + enum ipa_queue_type type) { int i; - struct dma_desc *desc = eth_ipa_ctx.tx_queue->tx_desc_ptrs[0]; - dma_addr_t desc_dma = eth_ipa_ctx.tx_queue->tx_desc_dma_addrs[0]; + struct dma_desc *desc = eth_ipa_ctx.tx_queue[type]->tx_desc_ptrs[0]; + dma_addr_t desc_dma = eth_ipa_ctx.tx_queue[type]->tx_desc_dma_addrs[0]; void *ipa_tx_buf_vaddr; dma_addr_t ipa_tx_buf_dma_addr; struct sg_table *buff_sgt; @@ -641,51 +1033,53 @@ static void ethqos_wrapper_tx_descriptor_init_single_q( struct platform_device *pdev = ethqos->pdev; struct net_device *dev = platform_get_drvdata(pdev); struct stmmac_priv *priv = netdev_priv(dev); - + unsigned int qinx = eth_ipa_queue_type_to_tx_queue(type); ETHQOSDBG("qinx = %u\n", qinx); /* Allocate TX Buffer Pool Structure */ - eth_ipa_ctx.tx_queue->ipa_tx_buff_pool_pa_addrs_base = + eth_ipa_ctx.tx_queue[type]->ipa_tx_pa_addrs_base = dma_zalloc_coherent (GET_MEM_PDEV_DEV, sizeof(dma_addr_t) * - eth_ipa_ctx.tx_queue->desc_cnt, - ð_ipa_ctx.tx_queue->ipa_tx_buff_pool_pa_addrs_base_dmahndl, + eth_ipa_ctx.tx_queue[type]->desc_cnt, + ð_ipa_ctx.tx_queue[type]->ipa_tx_pa_addrs_base_dmahndl, GFP_KERNEL); - if (!eth_ipa_ctx.tx_queue->ipa_tx_buff_pool_pa_addrs_base) { + if (!eth_ipa_ctx.tx_queue[type]->ipa_tx_pa_addrs_base) { ETHQOSERR("ERROR: Unable to allocate IPA TX Buff structure\n"); return; } ETHQOSDBG("IPA tx_dma_buff_addrs = %pK\n", - eth_ipa_ctx.tx_queue->ipa_tx_buff_pool_pa_addrs_base); + eth_ipa_ctx.tx_queue[type]->ipa_tx_pa_addrs_base); - for (i = 0; i < eth_ipa_ctx.tx_queue->desc_cnt; i++) { - eth_ipa_ctx.tx_queue->tx_desc_ptrs[i] = &desc[i]; - eth_ipa_ctx.tx_queue->tx_desc_dma_addrs[i] = + for (i = 0; i < eth_ipa_ctx.tx_queue[type]->desc_cnt; i++) { + eth_ipa_ctx.tx_queue[type]->tx_desc_ptrs[i] = &desc[i]; + eth_ipa_ctx.tx_queue[type]->tx_desc_dma_addrs[i] = (desc_dma + sizeof(struct dma_desc) * i); /* Create a memory pool for TX offload path */ - /* Currently only IPA_DMA_TX_CH is supported */ ipa_tx_buf_vaddr - = dma_alloc_coherent(GET_MEM_PDEV_DEV, ETHQOS_ETH_FRAME_LEN_IPA, - &ipa_tx_buf_dma_addr, GFP_KERNEL); + = dma_alloc_coherent( + GET_MEM_PDEV_DEV, + eth_ipa_queue_type_to_buf_length(type), + &ipa_tx_buf_dma_addr, GFP_KERNEL); if (!ipa_tx_buf_vaddr) { ETHQOSERR("Failed to allocate TX buf for IPA\n"); return; } - eth_ipa_ctx.tx_queue->ipa_tx_buff_pool_va_addrs_base[i] + eth_ipa_ctx.tx_queue[type]->ipa_tx_buff_pool_va_addrs_base[i] = ipa_tx_buf_vaddr; - eth_ipa_ctx.tx_queue->ipa_tx_buff_pool_pa_addrs_base[i] + eth_ipa_ctx.tx_queue[type]->ipa_tx_pa_addrs_base[i] = ipa_tx_buf_dma_addr; buff_sgt = kzalloc(sizeof(*buff_sgt), GFP_KERNEL); if (buff_sgt) { - ret = dma_get_sgtable(GET_MEM_PDEV_DEV, buff_sgt, - ipa_tx_buf_vaddr, - ipa_tx_buf_dma_addr, - ETHQOS_ETH_FRAME_LEN_IPA); + ret = dma_get_sgtable( + GET_MEM_PDEV_DEV, buff_sgt, + ipa_tx_buf_vaddr, + ipa_tx_buf_dma_addr, + eth_ipa_queue_type_to_buf_length(type)); if (ret == 0) { - eth_ipa_ctx.tx_queue->ipa_tx_buff_phy_addr[i] = - sg_phys(buff_sgt->sgl); + eth_ipa_ctx.tx_queue[type]->ipa_tx_phy_addr[i] + = sg_phys(buff_sgt->sgl); sg_free_table(buff_sgt); } else { ETHQOSERR("Failed to get sg RX Table\n"); @@ -696,74 +1090,77 @@ static void ethqos_wrapper_tx_descriptor_init_single_q( ETHQOSERR("Failed to get RX sg buffer.\n"); } } - if (ethqos->ipa_enabled && qinx == IPA_DMA_TX_CH) + if (ethqos->ipa_enabled && + qinx == eth_ipa_queue_type_to_tx_queue(type)) ETHQOSDBG("DMA MAPed the virtual address for %d descs\n", - eth_ipa_ctx.tx_queue[IPA_DMA_TX_CH].desc_cnt); + eth_ipa_ctx.tx_queue[type]->desc_cnt); - ethqos_ipa_tx_desc_init(ethqos, qinx); + ethqos_ipa_tx_desc_init(ethqos, type); } static void ethqos_wrapper_rx_descriptor_init_single_q( struct qcom_ethqos *ethqos, - unsigned int qinx) + enum ipa_queue_type type) { int i; - struct dma_desc *desc = eth_ipa_ctx.rx_queue->rx_desc_ptrs[0]; - dma_addr_t desc_dma = eth_ipa_ctx.rx_queue->rx_desc_dma_addrs[0]; + struct dma_desc *desc = eth_ipa_ctx.rx_queue[type]->rx_desc_ptrs[0]; + dma_addr_t desc_dma = eth_ipa_ctx.rx_queue[type]->rx_desc_dma_addrs[0]; struct platform_device *pdev = ethqos->pdev; struct net_device *dev = platform_get_drvdata(pdev); struct stmmac_priv *priv = netdev_priv(dev); + unsigned int qinx = eth_ipa_queue_type_to_rx_queue(type); ETHQOSDBG("\n"); /* Allocate RX Buffer Pool Structure */ - if (!eth_ipa_ctx.rx_queue->ipa_rx_buff_pool_pa_addrs_base) { - eth_ipa_ctx.rx_queue->ipa_rx_buff_pool_pa_addrs_base - = dma_zalloc_coherent + if (!eth_ipa_ctx.rx_queue[type]->ipa_rx_pa_addrs_base) { + eth_ipa_ctx.rx_queue[type]->ipa_rx_pa_addrs_base = + dma_zalloc_coherent (GET_MEM_PDEV_DEV, sizeof(dma_addr_t) * - eth_ipa_ctx.rx_queue->desc_cnt, - ð_ipa_ctx.rx_queue->ipa_rx_buff_pool_pa_addrs_base_dmahndl, + eth_ipa_ctx.rx_queue[type]->desc_cnt, + ð_ipa_ctx.rx_queue[type]->ipa_rx_pa_addrs_base_dmahndl, GFP_KERNEL); - if (!eth_ipa_ctx.rx_queue->ipa_rx_buff_pool_pa_addrs_base) { + if (!eth_ipa_ctx.rx_queue[type]->ipa_rx_pa_addrs_base) { ETHQOSERR("Unable to allocate structure\n"); return; } - ETHQOSDBG + ETHQOSERR ("IPA rx_buff_addrs %pK\n", - eth_ipa_ctx.rx_queue->ipa_rx_buff_pool_pa_addrs_base); + eth_ipa_ctx.rx_queue[type]->ipa_rx_pa_addrs_base); } - for (i = 0; i < eth_ipa_ctx.rx_queue[qinx].desc_cnt; i++) { - eth_ipa_ctx.rx_queue->rx_desc_ptrs[i] = &desc[i]; - eth_ipa_ctx.rx_queue->rx_desc_dma_addrs[i] = + for (i = 0; i < eth_ipa_ctx.rx_queue[type]->desc_cnt; i++) { + eth_ipa_ctx.rx_queue[type]->rx_desc_ptrs[i] = &desc[i]; + eth_ipa_ctx.rx_queue[type]->rx_desc_dma_addrs[i] = (desc_dma + sizeof(struct dma_desc) * i); /* allocate skb & assign to each desc */ - if (ethqos_alloc_ipa_rx_buf(ethqos, i, GFP_KERNEL)) + if (ethqos_alloc_ipa_rx_buf(ethqos, i, GFP_KERNEL, type)) break; /* Assign the RX memory pool for offload data path */ - eth_ipa_ctx.rx_queue->ipa_rx_buff_pool_va_addrs_base[i] - = eth_ipa_ctx.rx_queue->ipa_buff_va[i]; - eth_ipa_ctx.rx_queue->ipa_rx_buff_pool_pa_addrs_base[i] - = eth_ipa_ctx.rx_queue->skb_dma[i]; + eth_ipa_ctx.rx_queue[type]->ipa_rx_buff_pool_va_addrs_base[i] + = eth_ipa_ctx.rx_queue[type]->ipa_buff_va[i]; + eth_ipa_ctx.rx_queue[type]->ipa_rx_pa_addrs_base[i] + = eth_ipa_ctx.rx_queue[type]->skb_dma[i]; /* alloc_rx_buf */ wmb(); } - ETHQOSDBG("Allocated %d buffers for RX Channel: %d\n", - eth_ipa_ctx.rx_queue[qinx].desc_cnt, qinx); - if (ethqos->ipa_enabled && qinx == IPA_DMA_RX_CH) - ETHQOSDBG("virtual memory pool address for RX for %d desc\n", - eth_ipa_ctx.rx_queue[IPA_DMA_RX_CH].desc_cnt); + ETHQOSDBG( + "Allocated %d buffers for RX Channel: %d\n", + eth_ipa_ctx.rx_queue[type]->desc_cnt, qinx); + ETHQOSDBG( + "virtual memory pool address for RX for %d desc\n", + eth_ipa_ctx.rx_queue[type]->desc_cnt); - ethqos_ipa_rx_desc_init(ethqos, qinx); + ethqos_ipa_rx_desc_init(ethqos, type); } static void ethqos_rx_skb_free_mem(struct qcom_ethqos *ethqos, - unsigned int qinx) + enum ipa_queue_type type) { struct net_device *ndev; struct stmmac_priv *priv; @@ -777,29 +1174,36 @@ static void ethqos_rx_skb_free_mem(struct qcom_ethqos *ethqos, ndev = dev_get_drvdata(ðqos->pdev->dev); priv = netdev_priv(ndev); - for (i = 0; i < eth_ipa_ctx.rx_queue[qinx].desc_cnt; i++) { + for (i = 0; i < eth_ipa_ctx.rx_queue[type]->desc_cnt; i++) { dma_free_coherent (GET_MEM_PDEV_DEV, - ETHQOS_ETH_FRAME_LEN_IPA, - eth_ipa_ctx.rx_queue->ipa_rx_buff_pool_va_addrs_base[i], - eth_ipa_ctx.rx_queue->ipa_rx_buff_pool_pa_addrs_base[i]); + eth_ipa_queue_type_to_buf_length(type), + eth_ipa_ctx.rx_queue[type]->ipa_rx_buff_pool_va_addrs_base[i], + eth_ipa_ctx.rx_queue[type]->ipa_rx_pa_addrs_base[i]); } } static void ethqos_free_ipa_queue_mem(struct qcom_ethqos *ethqos) { - ethqos_rx_desc_free_mem(ethqos, IPA_DMA_RX_CH); - ethqos_tx_desc_free_mem(ethqos, IPA_DMA_TX_CH); - ethqos_rx_skb_free_mem(ethqos, IPA_DMA_RX_CH); - ethqos_rx_buf_free_mem(ethqos, IPA_DMA_RX_CH); - ethqos_tx_buf_free_mem(ethqos, IPA_DMA_TX_CH); - ethqos_free_ipa_rx_queue_struct(ethqos); - ethqos_free_ipa_tx_queue_struct(ethqos); + int type; + + for (type = 0; type < IPA_QUEUE_MAX; type++) { + if (eth_ipa_queue_type_enabled(type)) { + ethqos_rx_desc_free_mem(ethqos, type); + ethqos_tx_desc_free_mem(ethqos, type); + ethqos_rx_skb_free_mem(ethqos, type); + ethqos_rx_buf_free_mem(ethqos, type); + ethqos_tx_buf_free_mem(ethqos, type); + ethqos_free_ipa_rx_queue_struct(ethqos, type); + ethqos_free_ipa_tx_queue_struct(ethqos, type); + } + } } static int ethqos_set_ul_dl_smmu_ipa_params(struct qcom_ethqos *ethqos, struct ipa_ntn_setup_info *ul, - struct ipa_ntn_setup_info *dl) + struct ipa_ntn_setup_info *dl, + enum ipa_queue_type type) { int ret = 0; struct platform_device *pdev; @@ -829,10 +1233,10 @@ static int ethqos_set_ul_dl_smmu_ipa_params(struct qcom_ethqos *ethqos, } ret = dma_get_sgtable(GET_MEM_PDEV_DEV, ul->ring_base_sgt, - eth_ipa_ctx.rx_queue->rx_desc_ptrs[0], - eth_ipa_ctx.rx_queue->rx_desc_dma_addrs[0], + eth_ipa_ctx.rx_queue[type]->rx_desc_ptrs[0], + eth_ipa_ctx.rx_queue[type]->rx_desc_dma_addrs[0], (sizeof(struct dma_desc) * - eth_ipa_ctx.rx_queue->desc_cnt)); + eth_ipa_ctx.rx_queue[type]->desc_cnt)); if (ret) { ETHQOSERR("Failed to get IPA UL ring sgtable.\n"); kfree(ul->ring_base_sgt); @@ -851,11 +1255,11 @@ static int ethqos_set_ul_dl_smmu_ipa_params(struct qcom_ethqos *ethqos, } ret = dma_get_sgtable - (GET_MEM_PDEV_DEV, ul->buff_pool_base_sgt, - eth_ipa_ctx.rx_queue->ipa_rx_buff_pool_pa_addrs_base, - eth_ipa_ctx.rx_queue->ipa_rx_buff_pool_pa_addrs_base_dmahndl, - (sizeof(struct dma_desc) * - eth_ipa_ctx.rx_queue->desc_cnt)); + (GET_MEM_PDEV_DEV, ul->buff_pool_base_sgt, + eth_ipa_ctx.rx_queue[type]->ipa_rx_pa_addrs_base, + eth_ipa_ctx.rx_queue[type]->ipa_rx_pa_addrs_base_dmahndl, + (sizeof(struct dma_desc) * + eth_ipa_ctx.rx_queue[type]->desc_cnt)); if (ret) { ETHQOSERR("Failed to get IPA UL buff pool sgtable.\n"); kfree(ul->buff_pool_base_sgt); @@ -874,10 +1278,10 @@ static int ethqos_set_ul_dl_smmu_ipa_params(struct qcom_ethqos *ethqos, } ret = dma_get_sgtable(GET_MEM_PDEV_DEV, dl->ring_base_sgt, - eth_ipa_ctx.tx_queue->tx_desc_ptrs[0], - eth_ipa_ctx.tx_queue->tx_desc_dma_addrs[0], + eth_ipa_ctx.tx_queue[type]->tx_desc_ptrs[0], + eth_ipa_ctx.tx_queue[type]->tx_desc_dma_addrs[0], (sizeof(struct dma_desc) * - eth_ipa_ctx.tx_queue->desc_cnt)); + eth_ipa_ctx.tx_queue[type]->desc_cnt)); if (ret) { ETHQOSERR("Failed to get IPA DL ring sgtable.\n"); kfree(dl->ring_base_sgt); @@ -895,11 +1299,11 @@ static int ethqos_set_ul_dl_smmu_ipa_params(struct qcom_ethqos *ethqos, return -ENOMEM; } ret = dma_get_sgtable - (GET_MEM_PDEV_DEV, dl->buff_pool_base_sgt, - eth_ipa_ctx.tx_queue->ipa_tx_buff_pool_pa_addrs_base, - eth_ipa_ctx.tx_queue->ipa_tx_buff_pool_pa_addrs_base_dmahndl, - (sizeof(struct dma_desc) * - eth_ipa_ctx.tx_queue[IPA_DMA_TX_CH].desc_cnt)); + (GET_MEM_PDEV_DEV, dl->buff_pool_base_sgt, + eth_ipa_ctx.tx_queue[type]->ipa_tx_pa_addrs_base, + eth_ipa_ctx.tx_queue[type]->ipa_tx_pa_addrs_base_dmahndl, + (sizeof(struct dma_desc) * + eth_ipa_ctx.tx_queue[type]->desc_cnt)); if (ret) { ETHQOSERR("Failed to get IPA DL buff pool sgtable.\n"); kfree(dl->buff_pool_base_sgt); @@ -913,8 +1317,9 @@ static int ethqos_set_ul_dl_smmu_ipa_params(struct qcom_ethqos *ethqos, return ret; } -static int enable_tx_dma_interrupts(unsigned int QINX, - struct qcom_ethqos *ethqos) +static int enable_tx_dma_interrupts( + unsigned int QINX, struct qcom_ethqos *ethqos, + bool enable_sw_intr) { unsigned int tmp; unsigned long VARDMA_SR; @@ -927,7 +1332,7 @@ static int enable_tx_dma_interrupts(unsigned int QINX, tmp = VARDMA_SR & DMA_TX_INT_MASK; DMA_SR_RGWR(QINX, tmp); - /* Enable following interrupts for Queue 0 */ + /* Enable following interrupts for Queue */ /* NIE - Normal Interrupt Summary Enable */ /* AIE - Abnormal Interrupt Summary Enable */ /* FBE - Fatal Bus Error Enable */ @@ -938,13 +1343,17 @@ static int enable_tx_dma_interrupts(unsigned int QINX, VARDMA_IER = VARDMA_IER | ((0x1) << 12) | ((0x1) << 14) | ((0x1) << 15); + if (enable_sw_intr) + VARDMA_IER |= ((0x1) << 0); + DMA_IER_RGWR(QINX, VARDMA_IER); return 0; } -static int enable_rx_dma_interrupts(unsigned int QINX, - struct qcom_ethqos *ethqos) +static int enable_rx_dma_interrupts( + unsigned int QINX, struct qcom_ethqos *ethqos, + bool enable_sw_intr) { unsigned int tmp; unsigned long VARDMA_SR; @@ -969,6 +1378,9 @@ static int enable_rx_dma_interrupts(unsigned int QINX, VARDMA_IER = VARDMA_IER | ((0x1) << 14) | ((0x1) << 12) | ((0x1) << 15); + if (enable_sw_intr) + VARDMA_IER |= ((0x1) << 6); + DMA_IER_RGWR(QINX, VARDMA_IER); return 0; @@ -976,68 +1388,93 @@ static int enable_rx_dma_interrupts(unsigned int QINX, static void ethqos_ipa_config_queues(struct qcom_ethqos *ethqos) { - ethqos_alloc_ipa_tx_queue_struct(ethqos); - ethqos_alloc_ipa_rx_queue_struct(ethqos); - allocate_ipa_buffer_and_desc(ethqos); - ethqos_wrapper_tx_descriptor_init_single_q(ethqos, 0); - ethqos_wrapper_rx_descriptor_init_single_q(ethqos, 0); + int type; + + for (type = 0; type < IPA_QUEUE_MAX; type++) { + if (eth_ipa_queue_type_enabled(type)) { + ethqos_alloc_ipa_tx_queue_struct(ethqos, type); + ethqos_alloc_ipa_rx_queue_struct(ethqos, type); + allocate_ipa_buffer_and_desc(ethqos, type); + ethqos_wrapper_tx_descriptor_init_single_q( + ethqos, type); + ethqos_wrapper_rx_descriptor_init_single_q( + ethqos, type); + } + } } -static void ethqos_configure_ipa_tx_dma_channel(unsigned int QINX, - struct qcom_ethqos *ethqos) +static void ethqos_configure_ipa_tx_dma_channel( + struct qcom_ethqos *ethqos, enum ipa_queue_type type) { struct platform_device *pdev = ethqos->pdev; struct net_device *dev = platform_get_drvdata(pdev); struct stmmac_priv *priv = netdev_priv(dev); + unsigned int qinx = eth_ipa_queue_type_to_tx_queue(type); ETHQOSDBG("\n"); - enable_tx_dma_interrupts(QINX, ethqos); + enable_tx_dma_interrupts( + qinx, ethqos, + eth_ipa_queue_type_to_tx_intr_route(type) == IPA_INTR_ROUTE_DB); /* Enable Operate on Second Packet for better tputs */ - DMA_TCR_OSP_UDFWR(QINX, 0x1); + DMA_TCR_OSP_UDFWR(qinx, 0x1); /* start TX DMA */ - priv->hw->dma->start_tx(priv->ioaddr, IPA_DMA_TX_CH); + priv->hw->dma->start_tx(priv->ioaddr, qinx); ETHQOSDBG("\n"); } -static void ethqos_configure_ipa_rx_dma_channel(unsigned int QINX, - struct qcom_ethqos *ethqos) +static void ethqos_configure_ipa_rx_dma_channel( + struct qcom_ethqos *ethqos, enum ipa_queue_type type) { struct platform_device *pdev = ethqos->pdev; struct net_device *dev = platform_get_drvdata(pdev); struct stmmac_priv *priv = netdev_priv(dev); + unsigned int qinx = eth_ipa_queue_type_to_rx_queue(type); - ETHQOSDBG("\n"); - /*Select Rx Buffer size = 2048bytes */ + ETHQOSERR("\n"); - DMA_RCR_RBSZ_UDFWR(QINX, ETHQOS_ETH_FRAME_LEN_IPA); + /* Select Rx Buffer size */ + DMA_RCR_RBSZ_UDFWR(qinx, eth_ipa_queue_type_to_buf_length(type)); - enable_rx_dma_interrupts(QINX, ethqos); + enable_rx_dma_interrupts( + qinx, + ethqos, + eth_ipa_queue_type_to_rx_intr_route(type) == IPA_INTR_ROUTE_DB); /* start RX DMA */ - priv->hw->dma->start_rx(priv->ioaddr, IPA_DMA_RX_CH); + priv->hw->dma->start_rx(priv->ioaddr, qinx); - ETHQOSDBG("\n"); + ETHQOSERR("\n"); } -static int ethqos_init_offload(struct qcom_ethqos *ethqos) +static int ethqos_init_offload(struct qcom_ethqos *ethqos, + enum ipa_queue_type type) { struct stmmac_priv *priv = ethqos_get_priv(ethqos); ETHQOSDBG("\n"); - ethqos_configure_ipa_tx_dma_channel(IPA_DMA_TX_CH, ethqos); - priv->hw->mac->map_mtl_to_dma(priv->hw, 0, 0); - ethqos_configure_ipa_rx_dma_channel(IPA_DMA_RX_CH, ethqos); + ethqos_configure_ipa_tx_dma_channel(ethqos, type); + priv->hw->mac->map_mtl_to_dma( + priv->hw, + eth_ipa_queue_type_to_rx_queue(type), + eth_ipa_queue_type_to_rx_queue(type)); + ethqos_configure_ipa_rx_dma_channel(ethqos, type); ETHQOSDBG("\n"); return 0; } -static void ntn_ipa_notify_cb(void *priv, enum ipa_dp_evt_type evt, - unsigned long data) +static void ntn_ipa_notify_cb_cv2x( + void *priv, enum ipa_dp_evt_type evt, unsigned long data) +{ + WARN_ON(1); +} + +static void ntn_ipa_notify_cb_be( + void *priv, enum ipa_dp_evt_type evt, unsigned long data) { struct qcom_ethqos *ethqos = eth_ipa_ctx.ethqos; struct ethqos_prv_ipa_data *eth_ipa = ð_ipa_ctx; @@ -1071,7 +1508,9 @@ static void ntn_ipa_notify_cb(void *priv, enum ipa_dp_evt_type evt, if (evt == IPA_RECEIVE) { /*Exception packets to network stack*/ skb->dev = dev; - skb_record_rx_queue(skb, IPA_DMA_RX_CH); + skb_record_rx_queue( + skb, + eth_ipa_queue_type_to_rx_queue(IPA_QUEUE_BE)); if (true == *(u8 *)(skb->cb + 4)) { skb->protocol = htons(ETH_P_IP); @@ -1098,7 +1537,7 @@ static void ntn_ipa_notify_cb(void *priv, enum ipa_dp_evt_type evt, dev->stats.rx_dropped++; } else { /* Update Statistics */ - eth_ipa_ctx.ipa_stats.ipa_ul_exception++; + eth_ipa_ctx.ipa_stats[IPA_QUEUE_BE].ipa_ul_exception++; dev->stats.rx_packets++; dev->stats.rx_bytes += skb->len; } @@ -1110,7 +1549,8 @@ static void ntn_ipa_notify_cb(void *priv, enum ipa_dp_evt_type evt, } } -static int ethqos_ipa_offload_init(struct qcom_ethqos *pdata) +static int ethqos_ipa_offload_init( + struct qcom_ethqos *pdata, enum ipa_queue_type type) { struct ipa_uc_offload_intf_params in; struct ipa_uc_offload_out_params out; @@ -1123,7 +1563,6 @@ static int ethqos_ipa_offload_init(struct qcom_ethqos *pdata) #endif bool ipa_vlan_mode; int ret; - struct net_device *ndev = dev_get_drvdata(&pdata->pdev->dev); if (!pdata) { ETHQOSERR("Null Param\n"); @@ -1131,12 +1570,7 @@ static int ethqos_ipa_offload_init(struct qcom_ethqos *pdata) return ret; } - ret = ipa_is_vlan_mode(IPA_VLAN_IF_EMAC, &ipa_vlan_mode); - if (ret) { - ETHQOSERR("Could not read ipa_vlan_mode\n"); - /* In case of failure, fallback to non vlan mode */ - ipa_vlan_mode = 0; - } + ipa_vlan_mode = eth_ipa_queue_type_to_ipa_vlan_mode(type); ETHQOSDBG("IPA VLAN mode %d\n", ipa_vlan_mode); @@ -1147,9 +1581,13 @@ static int ethqos_ipa_offload_init(struct qcom_ethqos *pdata) if (!eth_ipa_ctx.vlan_id || !ipa_vlan_mode) { memset(ð_l2_hdr_v4, 0, sizeof(eth_l2_hdr_v4)); memset(ð_l2_hdr_v6, 0, sizeof(eth_l2_hdr_v6)); - memcpy(ð_l2_hdr_v4.h_source, ndev->dev_addr, ETH_ALEN); + memcpy( + ð_l2_hdr_v4.h_source, + eth_ipa_queue_type_to_device_addr(type), ETH_ALEN); eth_l2_hdr_v4.h_proto = htons(ETH_P_IP); - memcpy(ð_l2_hdr_v6.h_source, ndev->dev_addr, ETH_ALEN); + memcpy( + ð_l2_hdr_v6.h_source, + eth_ipa_queue_type_to_device_addr(type), ETH_ALEN); eth_l2_hdr_v6.h_proto = htons(ETH_P_IPV6); in.hdr_info[0].hdr = (u8 *)ð_l2_hdr_v4; in.hdr_info[0].hdr_len = ETH_HLEN; @@ -1163,13 +1601,17 @@ static int ethqos_ipa_offload_init(struct qcom_ethqos *pdata) if (ipa_vlan_mode) { memset(ð_vlan_hdr_v4, 0, sizeof(eth_vlan_hdr_v4)); memset(ð_vlan_hdr_v6, 0, sizeof(eth_vlan_hdr_v6)); - memcpy(ð_vlan_hdr_v4.h_source, ndev->dev_addr, ETH_ALEN); + memcpy( + ð_vlan_hdr_v4.h_source, + eth_ipa_queue_type_to_device_addr(type), ETH_ALEN); eth_vlan_hdr_v4.h_vlan_proto = htons(ETH_P_8021Q); eth_vlan_hdr_v4.h_vlan_encapsulated_proto = htons(ETH_P_IP); in.hdr_info[0].hdr = (u8 *)ð_vlan_hdr_v4; in.hdr_info[0].hdr_len = VLAN_ETH_HLEN; in.hdr_info[0].hdr_type = IPA_HDR_L2_802_1Q; - memcpy(ð_vlan_hdr_v6.h_source, ndev->dev_addr, ETH_ALEN); + memcpy( + ð_vlan_hdr_v6.h_source, + eth_ipa_queue_type_to_device_addr(type), ETH_ALEN); eth_vlan_hdr_v6.h_vlan_proto = htons(ETH_P_8021Q); eth_vlan_hdr_v6.h_vlan_encapsulated_proto = htons(ETH_P_IPV6); in.hdr_info[1].hdr = (u8 *)ð_vlan_hdr_v6; @@ -1179,10 +1621,10 @@ static int ethqos_ipa_offload_init(struct qcom_ethqos *pdata) #endif /* Building IN params */ - in.netdev_name = ndev->name; + in.netdev_name = eth_ipa_queue_type_to_device_name(type); in.priv = pdata; - in.notify = ntn_ipa_notify_cb; - in.proto = IPA_UC_NTN; + in.notify = eth_ipa_queue_type_to_ipa_notify_cb(type); + in.proto = eth_ipa_queue_type_to_proto(type); in.hdr_info[0].dst_mac_addr_offset = 0; in.hdr_info[1].dst_mac_addr_offset = 0; @@ -1193,40 +1635,42 @@ static int ethqos_ipa_offload_init(struct qcom_ethqos *pdata) ret = -1; return ret; } - eth_ipa->ipa_client_hndl = out.clnt_hndl; + eth_ipa->ipa_client_hndl[type] = out.clnt_hndl; ETHQOSDBG("Recevied IPA Offload Client Handle %d", - eth_ipa->ipa_client_hndl); + eth_ipa->ipa_client_hndl[type]); - pdata->ipa_enabled = true; return 0; } -static int ethqos_ipa_offload_cleanup(struct qcom_ethqos *ethqos) +static int ethqos_ipa_offload_cleanup( + struct qcom_ethqos *ethqos, enum ipa_queue_type type) { struct ethqos_prv_ipa_data *eth_ipa = ð_ipa_ctx; int ret = 0; - ETHQOSDBG("begin\n"); + ETHQOSERR("begin\n"); if (!ethqos) { ETHQOSERR("Null Param\n"); return -ENOMEM; } - if (!eth_ipa->ipa_client_hndl) { + if (!eth_ipa->ipa_client_hndl[type]) { ETHQOSERR("cleanup called with NULL IPA client handle\n"); return -ENOMEM; } - ret = ipa_uc_offload_cleanup(eth_ipa->ipa_client_hndl); + ret = ipa_uc_offload_cleanup(eth_ipa->ipa_client_hndl[type]); if (ret) { ETHQOSERR("Could not cleanup IPA Offload ret %d\n", ret); ret = -1; } else { - eth_ipa_send_msg(ethqos, IPA_PERIPHERAL_DISCONNECT); + if (eth_ipa_queue_type_to_send_msg_needed(type)) + eth_ipa_send_msg( + ethqos, IPA_PERIPHERAL_DISCONNECT, type); } - ETHQOSDBG("end\n"); + ETHQOSERR("end\n"); return ret; } @@ -1329,7 +1773,8 @@ static ssize_t read_ipa_stats(struct file *file, "=================================================="); len += scnprintf(buf + len, buf_len - len, "%-25s %10llu\n", - "IPA RX Packets: ", eth_ipa_ctx.ipa_stats.ipa_ul_exception); + "IPA RX Packets: ", + eth_ipa_ctx.ipa_stats[IPA_QUEUE_BE].ipa_ul_exception); len += scnprintf(buf + len, buf_len - len, "\n"); if (len > buf_len) @@ -1340,34 +1785,35 @@ static ssize_t read_ipa_stats(struct file *file, return ret_cnt; } -static void ethqos_ipa_stats_read(struct qcom_ethqos *ethqos) +static void ethqos_ipa_stats_read(struct qcom_ethqos *ethqos, + enum ipa_queue_type type) { - struct ethqos_ipa_stats *dma_stats = ð_ipa_ctx.ipa_stats; + struct ethqos_ipa_stats *dma_stats = ð_ipa_ctx.ipa_stats[type]; unsigned int data; - if (!eth_ipa_ctx.rx_queue || !eth_ipa_ctx.tx_queue) + if (!eth_ipa_ctx.rx_queue[type] || !eth_ipa_ctx.tx_queue[type]) return; dma_stats->ipa_rx_desc_ring_base - = eth_ipa_ctx.rx_queue->rx_desc_dma_addrs[0]; + = eth_ipa_ctx.rx_queue[type]->rx_desc_dma_addrs[0]; dma_stats->ipa_rx_desc_ring_size - = eth_ipa_ctx.rx_queue[IPA_DMA_RX_CH].desc_cnt; + = eth_ipa_ctx.rx_queue[type]->desc_cnt; dma_stats->ipa_rx_buff_ring_base - = eth_ipa_ctx.rx_queue->ipa_rx_buff_pool_pa_addrs_base_dmahndl; + = eth_ipa_ctx.rx_queue[type]->ipa_rx_pa_addrs_base_dmahndl; dma_stats->ipa_rx_buff_ring_size - = eth_ipa_ctx.rx_queue[IPA_DMA_RX_CH].desc_cnt - 1; + = eth_ipa_ctx.rx_queue[type]->desc_cnt - 1; - //@RK: IPA_INTEG Need Rx db received cnt from IPA uC dma_stats->ipa_rx_db_int_raised = 0; - DMA_CHRDR_RGRD(IPA_DMA_RX_CH, data); - dma_stats->ipa_rx_cur_desc_ptr_indx = GET_RX_DESC_IDX(IPA_DMA_RX_CH, - data); + DMA_CHRDR_RGRD(eth_ipa_queue_type_to_rx_queue(type), data); + dma_stats->ipa_rx_cur_desc_ptr_indx = GET_RX_DESC_IDX( + type, data); - DMA_RDTP_RPDR_RGRD(IPA_DMA_RX_CH, data); - dma_stats->ipa_rx_tail_ptr_indx = GET_RX_DESC_IDX(IPA_DMA_RX_CH, data); + DMA_RDTP_RPDR_RGRD(eth_ipa_queue_type_to_rx_queue(type), data); + dma_stats->ipa_rx_tail_ptr_indx = GET_RX_DESC_IDX( + type, data); - DMA_SR_RGRD(IPA_DMA_RX_CH, data); + DMA_SR_RGRD(eth_ipa_queue_type_to_rx_queue(type), data); dma_stats->ipa_rx_dma_status = data; dma_stats->ipa_rx_dma_ch_underflow = @@ -1379,32 +1825,37 @@ static void ethqos_ipa_stats_read(struct qcom_ethqos *ethqos) dma_stats->ipa_rx_dma_ch_complete = GET_VALUE(data, DMA_SR_RI_LPOS, DMA_SR_RI_LPOS, DMA_SR_RI_HPOS); - DMA_IER_RGRD(IPA_DMA_RX_CH, dma_stats->ipa_rx_int_mask); + DMA_IER_RGRD( + eth_ipa_queue_type_to_rx_queue(type), + dma_stats->ipa_rx_int_mask); - DMA_IER_RBUE_UDFRD(IPA_DMA_RX_CH, dma_stats->ipa_rx_underflow_irq); - DMA_IER_ETIE_UDFRD(IPA_DMA_RX_CH, - dma_stats->ipa_rx_early_trans_comp_irq); + DMA_IER_RBUE_UDFRD( + eth_ipa_queue_type_to_rx_queue(type), + dma_stats->ipa_rx_underflow_irq); + DMA_IER_ETIE_UDFRD( + eth_ipa_queue_type_to_rx_queue(type), + dma_stats->ipa_rx_early_trans_comp_irq); dma_stats->ipa_tx_desc_ring_base - = eth_ipa_ctx.tx_queue->tx_desc_dma_addrs[0]; + = eth_ipa_ctx.tx_queue[type]->tx_desc_dma_addrs[0]; dma_stats->ipa_tx_desc_ring_size - = eth_ipa_ctx.tx_queue[IPA_DMA_TX_CH].desc_cnt; + = eth_ipa_ctx.tx_queue[type]->desc_cnt; dma_stats->ipa_tx_buff_ring_base - = eth_ipa_ctx.tx_queue->ipa_tx_buff_pool_pa_addrs_base_dmahndl; + = eth_ipa_ctx.tx_queue[type]->ipa_tx_pa_addrs_base_dmahndl; dma_stats->ipa_tx_buff_ring_size - = eth_ipa_ctx.tx_queue[IPA_DMA_TX_CH].desc_cnt - 1; + = eth_ipa_ctx.tx_queue[type]->desc_cnt - 1; - //@RK: IPA_INTEG Need Tx db received cnt from IPA uC dma_stats->ipa_tx_db_int_raised = 0; - DMA_CHTDR_RGRD(IPA_DMA_TX_CH, data); - dma_stats->ipa_tx_curr_desc_ptr_indx = GET_TX_DESC_IDX - (IPA_DMA_TX_CH, data); + DMA_CHTDR_RGRD(eth_ipa_queue_type_to_tx_queue(type), data); + dma_stats->ipa_tx_curr_desc_ptr_indx = + GET_TX_DESC_IDX(type, data); - DMA_TDTP_TPDR_RGRD(IPA_DMA_TX_CH, data); - dma_stats->ipa_tx_tail_ptr_indx = GET_TX_DESC_IDX(IPA_DMA_TX_CH, data); + DMA_TDTP_TPDR_RGRD(eth_ipa_queue_type_to_tx_queue(type), data); + dma_stats->ipa_tx_tail_ptr_indx = + GET_TX_DESC_IDX(type, data); - DMA_SR_RGRD(IPA_DMA_TX_CH, data); + DMA_SR_RGRD(eth_ipa_queue_type_to_tx_queue(type), data); dma_stats->ipa_tx_dma_status = data; dma_stats->ipa_tx_dma_ch_underflow = @@ -1416,20 +1867,32 @@ static void ethqos_ipa_stats_read(struct qcom_ethqos *ethqos) dma_stats->ipa_tx_dma_transfer_complete = GET_VALUE(data, DMA_SR_TI_LPOS, DMA_SR_TI_LPOS, DMA_SR_TI_HPOS); - DMA_IER_RGRD(IPA_DMA_TX_CH, dma_stats->ipa_tx_int_mask); - DMA_IER_TIE_UDFRD(IPA_DMA_TX_CH, - dma_stats->ipa_tx_transfer_complete_irq); + DMA_IER_RGRD( + eth_ipa_queue_type_to_tx_queue(type), + dma_stats->ipa_tx_int_mask); + DMA_IER_TIE_UDFRD( + eth_ipa_queue_type_to_tx_queue(type), + dma_stats->ipa_tx_transfer_complete_irq); + + DMA_IER_TXSE_UDFRD( + eth_ipa_queue_type_to_tx_queue(type), + dma_stats->ipa_tx_transfer_stopped_irq); - DMA_IER_TXSE_UDFRD(IPA_DMA_TX_CH, - dma_stats->ipa_tx_transfer_stopped_irq); + DMA_IER_TBUE_UDFRD( + eth_ipa_queue_type_to_tx_queue(type), + dma_stats->ipa_tx_underflow_irq); - DMA_IER_TBUE_UDFRD(IPA_DMA_TX_CH, - dma_stats->ipa_tx_underflow_irq); + DMA_IER_ETIE_UDFRD( + eth_ipa_queue_type_to_tx_queue(type), + dma_stats->ipa_tx_early_trans_cmp_irq); - DMA_IER_ETIE_UDFRD(IPA_DMA_TX_CH, - dma_stats->ipa_tx_early_trans_cmp_irq); - DMA_IER_FBEE_UDFRD(IPA_DMA_TX_CH, dma_stats->ipa_tx_fatal_err_irq); - DMA_IER_CDEE_UDFRD(IPA_DMA_TX_CH, dma_stats->ipa_tx_desc_err_irq); + DMA_IER_FBEE_UDFRD( + eth_ipa_queue_type_to_tx_queue(type), + dma_stats->ipa_tx_fatal_err_irq); + + DMA_IER_CDEE_UDFRD( + eth_ipa_queue_type_to_tx_queue(type), + dma_stats->ipa_tx_desc_err_irq); } /** @@ -1443,139 +1906,181 @@ static ssize_t read_ntn_dma_stats(struct file *file, { struct qcom_ethqos *ethqos = file->private_data; struct ethqos_prv_ipa_data *eth_ipa = ð_ipa_ctx; - struct ethqos_ipa_stats *dma_stats = ð_ipa_ctx.ipa_stats; + struct ethqos_ipa_stats *dma_stats; char *buf; - unsigned int len = 0, buf_len = 3000; + unsigned int len = 0, buf_len = 6000; ssize_t ret_cnt; + int type; buf = kzalloc(buf_len, GFP_KERNEL); if (!buf) return -ENOMEM; - ethqos_ipa_stats_read(ethqos); - - len += scnprintf(buf + len, buf_len - len, "\n\n"); - len += scnprintf(buf + len, buf_len - len, "%25s\n", - "NTN DMA Stats"); - len += scnprintf(buf + len, buf_len - len, "%25s\n\n", - "=================================================="); - - len += scnprintf(buf + len, buf_len - len, "%-50s 0x%x\n", - "RX Desc Ring Base: ", dma_stats->ipa_rx_desc_ring_base); - len += scnprintf(buf + len, buf_len - len, "%-50s %10d\n", - "RX Desc Ring Size: ", dma_stats->ipa_rx_desc_ring_size); - len += scnprintf(buf + len, buf_len - len, "%-50s 0x%x\n", - "RX Buff Ring Base: ", dma_stats->ipa_rx_buff_ring_base); - len += scnprintf(buf + len, buf_len - len, "%-50s %10d\n", - "RX Buff Ring Size: ", dma_stats->ipa_rx_buff_ring_size); - len += scnprintf(buf + len, buf_len - len, "%-50s %10u\n", - "RX Doorbell Interrupts Raised: ", - dma_stats->ipa_rx_db_int_raised); - len += scnprintf(buf + len, buf_len - len, "%-50s %10d\n", - "RX Current Desc Pointer Index: ", - dma_stats->ipa_rx_cur_desc_ptr_indx); - len += scnprintf(buf + len, buf_len - len, "%-50s %10d\n", - "RX Tail Pointer Index: ", - dma_stats->ipa_rx_tail_ptr_indx); - len += scnprintf(buf + len, buf_len - len, "%-50s 0x%x\n", - "RX Doorbell Address: ", eth_ipa->uc_db_rx_addr); - len += scnprintf(buf + len, buf_len - len, "\n"); - - len += scnprintf(buf + len, buf_len - len, "%-50s 0x%x\n", - "RX DMA Status: ", dma_stats->ipa_rx_dma_status); - - len += scnprintf(buf + len, buf_len - len, "%-50s %10s\n", - "RX DMA Status - RX DMA Underflow : ", - bit_status_string - [dma_stats->ipa_rx_dma_ch_underflow]); - len += scnprintf(buf + len, buf_len - len, "%-50s %10s\n", - "RX DMA Status - RX DMA Stopped : ", - bit_status_string[dma_stats->ipa_rx_dma_ch_stopped]); - len += scnprintf(buf + len, buf_len - len, "%-50s %10s\n", - "RX DMA Status - RX DMA Complete : ", - bit_status_string[dma_stats->ipa_rx_dma_ch_complete]); - len += scnprintf(buf + len, buf_len - len, "\n"); - - len += scnprintf(buf + len, buf_len - len, "%-50s 0x%x\n", - "RX DMA CH0 INT Mask: ", dma_stats->ipa_rx_int_mask); - len += scnprintf(buf + len, buf_len - len, "%-50s %10s\n", - "RXDMACH0 INTMASK - Transfer Complete IRQ : ", - bit_mask_string - [dma_stats->ipa_rx_transfer_complete_irq]); - len += scnprintf(buf + len, buf_len - len, "%-50s %10s\n", - "RXDMACH0 INTMASK - Transfer Stopped IRQ : ", - bit_mask_string - [dma_stats->ipa_rx_transfer_stopped_irq]); - len += scnprintf(buf + len, buf_len - len, "%-50s %10s\n", - "RXDMACH0 INTMASK - Underflow IRQ : ", - bit_mask_string[dma_stats->ipa_rx_underflow_irq]); - len += scnprintf(buf + len, buf_len - len, "%-50s %10s\n", - "RXDMACH0 INTMASK - Early Transmit Complete IRQ : ", - bit_mask_string - [dma_stats->ipa_rx_early_trans_comp_irq]); - len += scnprintf(buf + len, buf_len - len, "\n"); - - len += scnprintf(buf + len, buf_len - len, "%-50s 0x%x\n", - "TX Desc Ring Base: ", dma_stats->ipa_tx_desc_ring_base); - len += scnprintf(buf + len, buf_len - len, "%-50s %10d\n", - "TX Desc Ring Size: ", dma_stats->ipa_tx_desc_ring_size); - len += scnprintf(buf + len, buf_len - len, "%-50s 0x%x\n", - "TX Buff Ring Base: ", dma_stats->ipa_tx_buff_ring_base); - len += scnprintf(buf + len, buf_len - len, "%-50s %10d\n", - "TX Buff Ring Size: ", dma_stats->ipa_tx_buff_ring_size); - len += scnprintf(buf + len, buf_len - len, "%-50s %10u\n", - "TX Doorbell Interrupts Raised: ", - dma_stats->ipa_tx_db_int_raised); - len += scnprintf(buf + len, buf_len - len, "%-50s %10lu\n", - "TX Current Desc Pointer Index: ", - dma_stats->ipa_tx_curr_desc_ptr_indx); - - len += scnprintf(buf + len, buf_len - len, "%-50s %10lu\n", - "TX Tail Pointer Index: ", dma_stats->ipa_tx_tail_ptr_indx); - len += scnprintf(buf + len, buf_len - len, "%-50s 0x%x\n", - "TX Doorbell Address: ", eth_ipa->uc_db_tx_addr); - len += scnprintf(buf + len, buf_len - len, "\n"); - - len += scnprintf(buf + len, buf_len - len, "%-50s 0x%x\n", - "TX DMA Status: ", dma_stats->ipa_tx_dma_status); - - len += scnprintf(buf + len, buf_len - len, "%-50s %10s\n", - "TX DMA Status - TX DMA Underflow : ", - bit_status_string - [dma_stats->ipa_tx_dma_ch_underflow]); - len += scnprintf(buf + len, buf_len - len, "%-50s %10s\n", - "TX DMA Status - TX DMA Transfer Stopped : ", - bit_status_string - [dma_stats->ipa_tx_dma_transfer_stopped]); - len += scnprintf(buf + len, buf_len - len, "%-50s %10s\n", - "TX DMA Status - TX DMA Transfer Complete : ", - bit_status_string - [dma_stats->ipa_tx_dma_transfer_complete]); - len += scnprintf(buf + len, buf_len - len, "\n"); - - len += scnprintf(buf + len, buf_len - len, "%-50s 0x%x\n", - "TX DMA CH2 INT Mask: ", dma_stats->ipa_tx_int_mask); - len += scnprintf(buf + len, buf_len - len, "%-50s %10s\n", - "TXDMACH2 INTMASK - Transfer Complete IRQ : ", - bit_mask_string - [dma_stats->ipa_tx_transfer_complete_irq]); - len += scnprintf(buf + len, buf_len - len, "%-50s %10s\n", - "TXDMACH2 INTMASK - Transfer Stopped IRQ : ", - bit_mask_string - [dma_stats->ipa_tx_transfer_stopped_irq]); - len += scnprintf(buf + len, buf_len - len, "%-50s %10s\n", - "TXDMACH2 INTMASK - Underflow IRQ : ", - bit_mask_string[dma_stats->ipa_tx_underflow_irq]); - len += scnprintf(buf + len, buf_len - len, "%-50s %10s\n", - "TXDMACH2 INTMASK - Early Transmit Complete IRQ : ", - bit_mask_string - [dma_stats->ipa_tx_early_trans_cmp_irq]); - len += scnprintf(buf + len, buf_len - len, "%-50s %10s\n", - "TXDMACH2 INTMASK - Fatal Bus Error IRQ : ", - bit_mask_string[dma_stats->ipa_tx_fatal_err_irq]); - len += scnprintf(buf + len, buf_len - len, "%-50s %10s\n", - "TXDMACH2 INTMASK - CNTX Desc Error IRQ : ", - bit_mask_string[dma_stats->ipa_tx_desc_err_irq]); + for (type = 0; type < IPA_QUEUE_MAX; type++) { + if (!eth_ipa_queue_type_enabled(type)) + continue; + + ethqos_ipa_stats_read(ethqos, type); + dma_stats = ð_ipa_ctx.ipa_stats[type]; + + len += scnprintf(buf + len, buf_len - len, "\n\n"); + len += scnprintf(buf + len, buf_len - len, "%25s\n", + "NTN DMA Stats"); + len += scnprintf(buf + len, buf_len - len, "%25s\n\n", + "=================================================="); + + len += scnprintf(buf + len, buf_len - len, "%-50s 0x%x\n", + "RX Desc Ring Base: ", + dma_stats->ipa_rx_desc_ring_base); + len += scnprintf(buf + len, buf_len - len, "%-50s %10d\n", + "RX Desc Ring Size: ", + dma_stats->ipa_rx_desc_ring_size); + len += scnprintf(buf + len, buf_len - len, "%-50s 0x%x\n", + "RX Buff Ring Base: ", + dma_stats->ipa_rx_buff_ring_base); + len += scnprintf(buf + len, buf_len - len, "%-50s %10d\n", + "RX Buff Ring Size: ", + dma_stats->ipa_rx_buff_ring_size); + len += scnprintf(buf + len, buf_len - len, "%-50s %10u\n", + "RX Doorbell Interrupts Raised: ", + dma_stats->ipa_rx_db_int_raised); + len += scnprintf(buf + len, buf_len - len, "%-50s %10d\n", + "RX Current Desc Pointer Index: ", + dma_stats->ipa_rx_cur_desc_ptr_indx); + len += scnprintf(buf + len, buf_len - len, "%-50s %10d\n", + "RX Tail Pointer Index: ", + dma_stats->ipa_rx_tail_ptr_indx); + len += scnprintf(buf + len, buf_len - len, "%-50s 0x%x\n", + "RX Doorbell Address: ", + eth_ipa->uc_db_rx_addr[type]); + len += scnprintf(buf + len, buf_len - len, "\n"); + + len += scnprintf(buf + len, buf_len - len, "%-50s 0x%x\n", + "RX DMA Status: ", + dma_stats->ipa_rx_dma_status); + + len += scnprintf(buf + len, buf_len - len, "%-50s %10s\n", + "RX DMA Status - RX DMA Underflow : ", + bit_status_string + [dma_stats->ipa_rx_dma_ch_underflow]); + len += scnprintf( + buf + len, buf_len - len, "%-50s %10s\n", + "RX DMA Status - RX DMA Stopped : ", + bit_status_string[dma_stats->ipa_rx_dma_ch_stopped]); + len += scnprintf( + buf + len, buf_len - len, "%-50s %10s\n", + "RX DMA Status - RX DMA Complete : ", + bit_status_string[dma_stats->ipa_rx_dma_ch_complete]); + len += scnprintf(buf + len, buf_len - len, "\n"); + + len += scnprintf(buf + len, buf_len - len, "%-50s 0x%x\n", + "RX DMA CH0 INT Mask: ", + dma_stats->ipa_rx_int_mask); + len += scnprintf(buf + len, buf_len - len, "%-50s %10s\n", + "RXDMACH0 INTMASK - Transfer Complete IRQ : ", + bit_mask_string + [dma_stats->ipa_rx_transfer_complete_irq]); + len += scnprintf(buf + len, buf_len - len, "%-50s %10s\n", + "RXDMACH0 INTMASK - Transfer Stopped IRQ : ", + bit_mask_string + [dma_stats->ipa_rx_transfer_stopped_irq]); + len += scnprintf( + buf + len, buf_len - len, "%-50s %10s\n", + "RXDMACH0 INTMASK - Underflow IRQ : ", + bit_mask_string[dma_stats->ipa_rx_underflow_irq]); + len += scnprintf( + buf + len, buf_len - len, "%-50s %10s\n", + "RXDMACH0 INTMASK - Early Transmit Complete IRQ : ", + bit_mask_string + [dma_stats->ipa_rx_early_trans_comp_irq]); + len += scnprintf(buf + len, buf_len - len, "\n"); + + len += scnprintf( + buf + len, buf_len - len, "%-50s 0x%x\n", + "TX Desc Ring Base: ", + dma_stats->ipa_tx_desc_ring_base); + len += scnprintf( + buf + len, buf_len - len, "%-50s %10d\n", + "TX Desc Ring Size: ", + dma_stats->ipa_tx_desc_ring_size); + len += scnprintf( + buf + len, buf_len - len, "%-50s 0x%x\n", + "TX Buff Ring Base: ", + dma_stats->ipa_tx_buff_ring_base); + len += scnprintf( + buf + len, buf_len - len, "%-50s %10d\n", + "TX Buff Ring Size: ", + dma_stats->ipa_tx_buff_ring_size); + len += scnprintf( + buf + len, buf_len - len, "%-50s %10u\n", + "TX Doorbell Interrupts Raised: ", + dma_stats->ipa_tx_db_int_raised); + len += scnprintf( + buf + len, buf_len - len, "%-50s %10lu\n", + "TX Current Desc Pointer Index: ", + dma_stats->ipa_tx_curr_desc_ptr_indx); + + len += scnprintf( + buf + len, buf_len - len, "%-50s %10lu\n", + "TX Tail Pointer Index: ", + dma_stats->ipa_tx_tail_ptr_indx); + len += scnprintf( + buf + len, buf_len - len, "%-50s 0x%x\n", + "TX Doorbell Address: ", eth_ipa->uc_db_tx_addr[type]); + len += scnprintf(buf + len, buf_len - len, "\n"); + + len += scnprintf( + buf + len, buf_len - len, "%-50s 0x%x\n", + "TX DMA Status: ", dma_stats->ipa_tx_dma_status); + + len += scnprintf( + buf + len, buf_len - len, "%-50s %10s\n", + "TX DMA Status - TX DMA Underflow : ", + bit_status_string + [dma_stats->ipa_tx_dma_ch_underflow]); + len += scnprintf( + buf + len, buf_len - len, "%-50s %10s\n", + "TX DMA Status - TX DMA Transfer Stopped : ", + bit_status_string + [dma_stats->ipa_tx_dma_transfer_stopped]); + len += scnprintf( + buf + len, buf_len - len, "%-50s %10s\n", + "TX DMA Status - TX DMA Transfer Complete : ", + bit_status_string + [dma_stats->ipa_tx_dma_transfer_complete]); + len += scnprintf(buf + len, buf_len - len, "\n"); + + len += scnprintf( + buf + len, buf_len - len, "%-50s 0x%x\n", + "TX DMA CH2 INT Mask: ", dma_stats->ipa_tx_int_mask); + len += scnprintf( + buf + len, buf_len - len, "%-50s %10s\n", + "TXDMACH2 INTMASK - Transfer Complete IRQ : ", + bit_mask_string + [dma_stats->ipa_tx_transfer_complete_irq]); + len += scnprintf( + buf + len, buf_len - len, "%-50s %10s\n", + "TXDMACH2 INTMASK - Transfer Stopped IRQ : ", + bit_mask_string + [dma_stats->ipa_tx_transfer_stopped_irq]); + len += scnprintf( + buf + len, buf_len - len, "%-50s %10s\n", + "TXDMACH2 INTMASK - Underflow IRQ : ", + bit_mask_string[dma_stats->ipa_tx_underflow_irq]); + len += scnprintf( + buf + len, buf_len - len, "%-50s %10s\n", + "TXDMACH2 INTMASK - Early Transmit Complete IRQ : ", + bit_mask_string + [dma_stats->ipa_tx_early_trans_cmp_irq]); + len += scnprintf( + buf + len, buf_len - len, "%-50s %10s\n", + "TXDMACH2 INTMASK - Fatal Bus Error IRQ : ", + bit_mask_string[dma_stats->ipa_tx_fatal_err_irq]); + len += scnprintf( + buf + len, buf_len - len, "%-50s %10s\n", + "TXDMACH2 INTMASK - CNTX Desc Error IRQ : ", + bit_mask_string[dma_stats->ipa_tx_desc_err_irq]); + } if (len > buf_len) len = buf_len; @@ -1627,7 +2132,7 @@ static int ethqos_ipa_cleanup_debugfs(struct qcom_ethqos *ethqos) eth_ipa->debugfs_suspend_ipa_offload = NULL; } - ETHQOSDBG("IPA debugfs Deleted Successfully\n"); + ETHQOSERR("IPA debugfs Deleted Successfully\n"); return 0; } @@ -1682,7 +2187,8 @@ static int ethqos_ipa_create_debugfs(struct qcom_ethqos *ethqos) return -ENOMEM; } -static int ethqos_ipa_offload_connect(struct qcom_ethqos *ethqos) +static int ethqos_ipa_offload_connect( + struct qcom_ethqos *ethqos, enum ipa_queue_type type) { struct ethqos_prv_ipa_data *eth_ipa = ð_ipa_ctx; struct ipa_uc_offload_conn_in_params in; @@ -1701,16 +2207,22 @@ static int ethqos_ipa_offload_connect(struct qcom_ethqos *ethqos) } /* Configure interrupt route for ETHQOS TX DMA channel to IPA */ - RGMII_GPIO_CFG_TX_INT_UDFWR(IPA_DMA_TX_CH); + /* Currently, HW route is supported only for one DMA channel */ + if (eth_ipa_queue_type_to_tx_intr_route(type) == IPA_INTR_ROUTE_HW) + RGMII_GPIO_CFG_TX_INT_UDFWR( + eth_ipa_queue_type_to_tx_queue(type)); /* Configure interrupt route for ETHQOS RX DMA channel to IPA */ - RGMII_GPIO_CFG_RX_INT_UDFWR(IPA_DMA_RX_CH); + /* Currently, HW route is supported only for one DMA channel */ + if (eth_ipa_queue_type_to_rx_intr_route(type) == IPA_INTR_ROUTE_HW) + RGMII_GPIO_CFG_RX_INT_UDFWR( + eth_ipa_queue_type_to_rx_queue(type)); memset(&in, 0, sizeof(in)); memset(&out, 0, sizeof(out)); memset(&profile, 0, sizeof(profile)); - in.clnt_hndl = eth_ipa->ipa_client_hndl; + in.clnt_hndl = eth_ipa->ipa_client_hndl[type]; /* Uplink Setup */ if (stmmac_emb_smmu_ctx.valid) @@ -1718,58 +2230,61 @@ static int ethqos_ipa_offload_connect(struct qcom_ethqos *ethqos) else rx_setup_info.smmu_enabled = false; - rx_setup_info.client = IPA_CLIENT_ETHERNET_PROD; + rx_setup_info.client = eth_ipa_queue_type_to_rx_client(type); + rx_setup_info.db_mode = eth_ipa_queue_type_to_rx_intr_route(type); if (!rx_setup_info.smmu_enabled) - rx_setup_info.ring_base_pa - = (phys_addr_t)eth_ipa_ctx.rx_queue->rx_desc_dma_addrs[0]; + rx_setup_info.ring_base_pa = + (phys_addr_t)eth_ipa_ctx.rx_queue[type]->rx_desc_dma_addrs[0]; rx_setup_info.ring_base_iova - = eth_ipa_ctx.rx_queue->rx_desc_dma_addrs[0]; + = eth_ipa_ctx.rx_queue[type]->rx_desc_dma_addrs[0]; rx_setup_info.ntn_ring_size - = eth_ipa_ctx.rx_queue->desc_cnt; + = eth_ipa_ctx.rx_queue[type]->desc_cnt; if (!rx_setup_info.smmu_enabled) - rx_setup_info.buff_pool_base_pa - = eth_ipa_ctx.rx_queue->ipa_rx_buff_pool_pa_addrs_base_dmahndl; - rx_setup_info.buff_pool_base_iova - = eth_ipa_ctx.rx_queue->ipa_rx_buff_pool_pa_addrs_base_dmahndl; - rx_setup_info.num_buffers - = eth_ipa_ctx.rx_queue[IPA_DMA_RX_CH].desc_cnt - 1; - rx_setup_info.data_buff_size = ETHQOS_ETH_FRAME_LEN_IPA; - - /* Base address here is the address of ETHQOS_DMA_CH0_CONTROL + rx_setup_info.buff_pool_base_pa = + eth_ipa_ctx.rx_queue[type]->ipa_rx_pa_addrs_base_dmahndl; + rx_setup_info.buff_pool_base_iova = + eth_ipa_ctx.rx_queue[type]->ipa_rx_pa_addrs_base_dmahndl; + rx_setup_info.num_buffers = + eth_ipa_ctx.rx_queue[type]->desc_cnt - 1; + rx_setup_info.data_buff_size = eth_ipa_queue_type_to_buf_length(type); + + /* Base address here is the address of ETHQOS_DMA_CH(i)_CONTROL * in ETHQOS resgister space */ - rx_setup_info.ntn_reg_base_ptr_pa = (phys_addr_t) - (((unsigned long)(DMA_CR0_RGOFFADDR - BASE_ADDRESS)) + - (unsigned long)ethqos->emac_mem_base); + rx_setup_info.ntn_reg_base_ptr_pa = + eth_ipa_queue_type_to_rx_reg_base_ptr_pa(type); + /* Downlink Setup */ if (stmmac_emb_smmu_ctx.valid) tx_setup_info.smmu_enabled = true; else tx_setup_info.smmu_enabled = false; - tx_setup_info.client = IPA_CLIENT_ETHERNET_CONS; + tx_setup_info.client = eth_ipa_queue_type_to_tx_client(type); + tx_setup_info.db_mode = eth_ipa_queue_type_to_tx_intr_route(type); if (!tx_setup_info.smmu_enabled) - tx_setup_info.ring_base_pa - = (phys_addr_t)eth_ipa_ctx.tx_queue->tx_desc_dma_addrs[0]; + tx_setup_info.ring_base_pa = + (phys_addr_t)eth_ipa_ctx.tx_queue[type]->tx_desc_dma_addrs[0]; tx_setup_info.ring_base_iova - = eth_ipa_ctx.tx_queue->tx_desc_dma_addrs[0]; - tx_setup_info.ntn_ring_size = eth_ipa_ctx.tx_queue->desc_cnt; + = eth_ipa_ctx.tx_queue[type]->tx_desc_dma_addrs[0]; + tx_setup_info.ntn_ring_size = eth_ipa_ctx.tx_queue[type]->desc_cnt; if (!tx_setup_info.smmu_enabled) - tx_setup_info.buff_pool_base_pa - = eth_ipa_ctx.tx_queue->ipa_tx_buff_pool_pa_addrs_base_dmahndl; - tx_setup_info.buff_pool_base_iova - = eth_ipa_ctx.tx_queue->ipa_tx_buff_pool_pa_addrs_base_dmahndl; - tx_setup_info.num_buffers - = eth_ipa_ctx.tx_queue->desc_cnt - 1; - tx_setup_info.data_buff_size = ETHQOS_ETH_FRAME_LEN_IPA; - - /* Base address here is the address of ETHQOS_DMA_CH0_CONTROL + tx_setup_info.buff_pool_base_pa = + eth_ipa_ctx.tx_queue[type]->ipa_tx_pa_addrs_base_dmahndl; + + tx_setup_info.buff_pool_base_iova = + eth_ipa_ctx.tx_queue[type]->ipa_tx_pa_addrs_base_dmahndl; + + tx_setup_info.num_buffers = + eth_ipa_ctx.tx_queue[type]->desc_cnt - 1; + tx_setup_info.data_buff_size = eth_ipa_queue_type_to_buf_length(type); + + /* Base address here is the address of ETHQOS_DMA_CH(i)_CONTROL * in ETHQOS resgister space */ - tx_setup_info.ntn_reg_base_ptr_pa = (phys_addr_t) - (((unsigned long)(DMA_CR0_RGOFFADDR - BASE_ADDRESS)) - + (unsigned long)ethqos->emac_mem_base); + tx_setup_info.ntn_reg_base_ptr_pa = + eth_ipa_queue_type_to_tx_reg_base_ptr_pa(type); rx_setup_info.data_buff_list = kcalloc(rx_setup_info.num_buffers, sizeof(struct ntn_buff_smmu_map), @@ -1789,30 +2304,32 @@ static int ethqos_ipa_offload_connect(struct qcom_ethqos *ethqos) } for (i = 0; i < rx_setup_info.num_buffers; i++) { - rx_setup_info.data_buff_list[i].iova - = eth_ipa_ctx.rx_queue->ipa_rx_buff_pool_pa_addrs_base[i]; + rx_setup_info.data_buff_list[i].iova = + eth_ipa_ctx.rx_queue[type]->ipa_rx_pa_addrs_base[i]; + if (!rx_setup_info.smmu_enabled) { - rx_setup_info.data_buff_list[i].pa - = rx_setup_info.data_buff_list[i].iova; + rx_setup_info.data_buff_list[i].pa = + rx_setup_info.data_buff_list[i].iova; } else { - rx_setup_info.data_buff_list[i].pa - = eth_ipa_ctx.rx_queue->ipa_rx_buff_phy_addr[i]; + rx_setup_info.data_buff_list[i].pa = + eth_ipa_ctx.rx_queue[type]->ipa_rx_buff_phy_addr[i]; } } for (i = 0; i < tx_setup_info.num_buffers; i++) { - tx_setup_info.data_buff_list[i].iova - = eth_ipa_ctx.tx_queue->ipa_tx_buff_pool_pa_addrs_base[i]; + tx_setup_info.data_buff_list[i].iova = + eth_ipa_ctx.tx_queue[type]->ipa_tx_pa_addrs_base[i]; + if (!tx_setup_info.smmu_enabled) - tx_setup_info.data_buff_list[i].pa - = tx_setup_info.data_buff_list[i].iova; + tx_setup_info.data_buff_list[i].pa = + tx_setup_info.data_buff_list[i].iova; else - tx_setup_info.data_buff_list[i].pa - = eth_ipa_ctx.tx_queue->ipa_tx_buff_phy_addr[i]; + tx_setup_info.data_buff_list[i].pa = + eth_ipa_ctx.tx_queue[type]->ipa_tx_phy_addr[i]; } if (stmmac_emb_smmu_ctx.valid) { ret = ethqos_set_ul_dl_smmu_ipa_params(ethqos, &rx_setup_info, - &tx_setup_info); + &tx_setup_info, type); if (ret) { ETHQOSERR("Failed to build ipa_setup_info :%d\n", ret); ret = -1; @@ -1820,7 +2337,9 @@ static int ethqos_ipa_offload_connect(struct qcom_ethqos *ethqos) } } + // IPA <- IPAUC <- EMAC <- CLIENT in.u.ntn.ul = rx_setup_info; + // IPA -> IPAUC -> EMAC -> CLIENT in.u.ntn.dl = tx_setup_info; ret = ipa_uc_offload_conn_pipes(&in, &out); @@ -1830,29 +2349,36 @@ static int ethqos_ipa_offload_connect(struct qcom_ethqos *ethqos) goto mem_free; } - eth_ipa->uc_db_rx_addr = out.u.ntn.ul_uc_db_pa; - eth_ipa->uc_db_tx_addr = out.u.ntn.dl_uc_db_pa; + eth_ipa->uc_db_rx_addr[type] = out.u.ntn.ul_uc_db_iomem; + eth_ipa->uc_db_tx_addr[type] = out.u.ntn.dl_uc_db_iomem; + + ETHQOSDBG("type=%d rx_db=%x\n", type, eth_ipa_ctx.uc_db_rx_addr[type]); + ETHQOSDBG("type=%d tx_db=%x\n", type, eth_ipa_ctx.uc_db_tx_addr[type]); /* Set Perf Profile For PROD/CONS Pipes */ profile.max_supported_bw_mbps = ethqos->speed; - profile.client = IPA_CLIENT_ETHERNET_PROD; + profile.client = eth_ipa_queue_type_to_rx_client(type); + profile.proto = eth_ipa_queue_type_to_proto(type); ret = ipa_set_perf_profile(&profile); if (ret) { - ETHQOSERR("Err to set BW: IPA_RM_RESOURCE_ETHERNET_PROD :%d\n", - ret); + ETHQOSERR("%s: Err set IPA_RM_RESOURCE_ETHERNET_PROD :%d\n", + __func__, ret); ret = -1; goto mem_free; } - profile.client = IPA_CLIENT_ETHERNET_CONS; + profile.client = eth_ipa_queue_type_to_tx_client(type); + profile.proto = eth_ipa_queue_type_to_proto(type); ret = ipa_set_perf_profile(&profile); if (ret) { - ETHQOSERR("Err to set BW: IPA_RM_RESOURCE_ETHERNET_CONS :%d\n", - ret); + ETHQOSERR("%s: Err set IPA_RM_RESOURCE_ETHERNET_CONS :%d\n", + __func__, ret); ret = -1; goto mem_free; } - eth_ipa_send_msg(ethqos, IPA_PERIPHERAL_CONNECT); + + if (eth_ipa_queue_type_to_send_msg_needed(type)) + eth_ipa_send_msg(ethqos, IPA_PERIPHERAL_CONNECT, type); mem_free: kfree(rx_setup_info.data_buff_list); rx_setup_info.data_buff_list = NULL; @@ -1884,10 +2410,11 @@ static int ethqos_ipa_offload_connect(struct qcom_ethqos *ethqos) } ETHQOSDBG("end\n"); - return 0; + return ret; } -static int ethqos_ipa_offload_disconnect(struct qcom_ethqos *ethqos) +static int ethqos_ipa_offload_disconnect( + struct qcom_ethqos *ethqos, enum ipa_queue_type type) { struct ethqos_prv_ipa_data *eth_ipa = ð_ipa_ctx; int ret = 0; @@ -1899,7 +2426,7 @@ static int ethqos_ipa_offload_disconnect(struct qcom_ethqos *ethqos) return -ENOMEM; } - ret = ipa_uc_offload_disconn_pipes(eth_ipa->ipa_client_hndl); + ret = ipa_uc_offload_disconn_pipes(eth_ipa->ipa_client_hndl[type]); if (ret) { ETHQOSERR("Could not cleanup IPA Offload ret %d\n", ret); return ret; @@ -1916,49 +2443,83 @@ static int ethqos_ipa_offload_suspend(struct qcom_ethqos *ethqos) struct platform_device *pdev = ethqos->pdev; struct net_device *dev = platform_get_drvdata(pdev); struct stmmac_priv *priv = netdev_priv(dev); + int type; ETHQOSDBG("Suspend/disable IPA offload\n"); - priv->hw->dma->stop_rx(priv->ioaddr, IPA_DMA_RX_CH); - if (ret != 0) { - ETHQOSERR("stop_dma_rx failed %d\n", ret); - return ret; + for (type = 0; type < IPA_QUEUE_MAX; type++) { + if (eth_ipa_queue_type_enabled(type)) { + priv->hw->dma->stop_rx( + priv->ioaddr, + eth_ipa_queue_type_to_rx_queue(type)); + + if (ret != 0) { + ETHQOSERR("%s: stop_dma_rx failed %d\n", + __func__, ret); + return ret; + } + } } /* Disconnect IPA offload */ if (eth_ipa_ctx.ipa_offload_conn) { - ret = ethqos_ipa_offload_disconnect(ethqos); - if (ret) { - ETHQOSERR("IPA Offload Disconnect Failed :%d\n", ret); - return ret; + for (type = 0; type < IPA_QUEUE_MAX; type++) { + if (eth_ipa_queue_type_enabled(type)) { + ret = ethqos_ipa_offload_disconnect(ethqos, + type); + if (ret) { + ETHQOSERR("%s: Disconnect Failed %d\n", + __func__, ret); + return ret; + } + } + eth_ipa_ctx.ipa_offload_conn = false; } - eth_ipa_ctx.ipa_offload_conn = false; - ETHQOSDBG("IPA Offload Disconnect Successfully\n"); + ETHQOSERR("IPA Offload Disconnect Successfully\n"); } - priv->hw->dma->stop_tx(priv->ioaddr, IPA_DMA_TX_CH); + for (type = 0; type < IPA_QUEUE_MAX; type++) { + if (eth_ipa_queue_type_enabled(type)) { + priv->hw->dma->stop_tx( + priv->ioaddr, + eth_ipa_queue_type_to_tx_queue(type)); - if (ret != 0) { - ETHQOSERR("stop_dma_tx failed %d\n", ret); - return ret; + if (ret != 0) { + ETHQOSERR("%s: stop_dma_tx failed %d\n", + __func__, ret); + return ret; + } + } } if (eth_ipa_ctx.ipa_uc_ready) { profile.max_supported_bw_mbps = 0; - profile.client = IPA_CLIENT_ETHERNET_CONS; - ret = ipa_set_perf_profile(&profile); - if (ret) - ETHQOSERR("Err set IPA_RM_RESOURCE_ETHERNET_CONS:%d\n", - ret); + for (type = 0; type < IPA_QUEUE_MAX; type++) { + if (eth_ipa_queue_type_enabled(type)) { + profile.client = + eth_ipa_queue_type_to_tx_client(type); + profile.proto = + eth_ipa_queue_type_to_proto(type); + ret = ipa_set_perf_profile(&profile); + if (ret) + ETHQOSERR("%s: Err set BW for TX %d\n", + __func__, ret); + } + } } if (eth_ipa_ctx.ipa_offload_init) { - ret = ethqos_ipa_offload_cleanup(ethqos); - if (ret) { - ETHQOSERR("IPA Offload Cleanup Failed, err: %d\n", ret); - return ret; + for (type = 0; type < IPA_QUEUE_MAX; type++) { + if (eth_ipa_queue_type_enabled(type)) { + ret = ethqos_ipa_offload_cleanup(ethqos, type); + if (ret) { + ETHQOSERR("%s: Cleanup Failed, %d\n", + __func__, ret); + return ret; + } + } } - ETHQOSDBG("IPA Offload Cleanup Success\n"); + ETHQOSINFO("IPA Offload Cleanup Success\n"); eth_ipa_ctx.ipa_offload_init = false; } @@ -1969,44 +2530,63 @@ static int ethqos_ipa_offload_resume(struct qcom_ethqos *ethqos) { int ret = 1; struct ipa_perf_profile profile; + int type; ETHQOSDBG("Enter\n"); if (!eth_ipa_ctx.ipa_offload_init) { - if (!ethqos_ipa_offload_init(ethqos)) { - eth_ipa_ctx.ipa_offload_init = true; - } else { - eth_ipa_ctx.ipa_offload_init = false; - ETHQOSERR("PA Offload Init Failed\n"); + for (type = 0; type < IPA_QUEUE_MAX; type++) { + if (eth_ipa_queue_type_enabled(type)) { + eth_ipa_ctx.ipa_offload_init = + !ethqos_ipa_offload_init(ethqos, type); + if (!eth_ipa_ctx.ipa_offload_init) + ETHQOSERR("%s: Init Failed for %d\n", + __func__, type); + } } } /* Initialze descriptors before IPA connect */ /* Set IPA owned DMA channels to reset state */ - ethqos_ipa_tx_desc_init(ethqos, IPA_DMA_TX_CH); - ethqos_ipa_rx_desc_init(ethqos, IPA_DMA_RX_CH); + for (type = 0; type < IPA_QUEUE_MAX; type++) { + if (eth_ipa_queue_type_enabled(type)) { + ethqos_ipa_tx_desc_init(ethqos, type); + ethqos_ipa_rx_desc_init(ethqos, type); + } + } - ETHQOSDBG("DWC_ETH_QOS_ipa_offload_connect\n"); - ret = ethqos_ipa_offload_connect(ethqos); - if (ret != 0) - goto fail; - else - eth_ipa_ctx.ipa_offload_conn = true; + ETHQOSERR("DWC_ETH_QOS_ipa_offload_connect\n"); + for (type = 0; type < IPA_QUEUE_MAX; type++) { + if (eth_ipa_queue_type_enabled(type)) { + ret = ethqos_ipa_offload_connect(ethqos, type); + if (ret != 0) + goto fail; + else + eth_ipa_ctx.ipa_offload_conn = true; + } + } profile.max_supported_bw_mbps = ethqos->speed; - profile.client = IPA_CLIENT_ETHERNET_CONS; - ret = ipa_set_perf_profile(&profile); - if (ret) - ETHQOSERR("Err to set BW: IPA_RM_RESOURCE_ETHERNET_CONS :%d\n", - ret); - /*Initialize DMA CHs for offload*/ - ethqos_init_offload(ethqos); - if (ret) { - ETHQOSERR("Offload channel Init Failed\n"); - return ret; + for (type = 0; type < IPA_QUEUE_MAX; type++) { + if (eth_ipa_queue_type_enabled(type)) { + profile.client = eth_ipa_queue_type_to_tx_client(type); + profile.proto = eth_ipa_queue_type_to_proto(type); + ret = ipa_set_perf_profile(&profile); + if (ret) + ETHQOSERR("%s: Err set BW for TX: %d\n", + __func__, ret); + + /*Initialize DMA CHs for offload*/ + ethqos_init_offload(ethqos, type); + if (ret) { + ETHQOSERR("Offload channel Init Failed\n"); + return ret; + } + } } ETHQOSDBG("Exit\n"); + fail: return ret; } @@ -2043,64 +2623,93 @@ static int ethqos_disable_ipa_offload(struct qcom_ethqos *ethqos) static int ethqos_enable_ipa_offload(struct qcom_ethqos *ethqos) { int ret = 0; + int type; if (!eth_ipa_ctx.ipa_offload_init) { - ret = ethqos_ipa_offload_init(ethqos); - if (ret) { - eth_ipa_ctx.ipa_offload_init = false; - ETHQOSERR("IPA Offload Init Failed\n"); - goto fail; + for (type = 0; type < IPA_QUEUE_MAX; type++) { + if (eth_ipa_queue_type_enabled(type)) { + ret = ethqos_ipa_offload_init(ethqos, type); + if (ret) { + ETHQOSERR("%s: Init Failed\n", + __func__); + eth_ipa_ctx.ipa_offload_init = false; + goto fail; + } + } } - ETHQOSDBG("IPA Offload Initialized Successfully\n"); + ETHQOSINFO("IPA Offload Initialized Successfully\n"); eth_ipa_ctx.ipa_offload_init = true; } if (!eth_ipa_ctx.ipa_offload_conn && !eth_ipa_ctx.ipa_offload_susp) { - ret = ethqos_ipa_offload_connect(ethqos); - if (ret) { - ETHQOSERR("IPA Offload Connect Failed\n"); - eth_ipa_ctx.ipa_offload_conn = false; - goto fail; + for (type = 0; type < IPA_QUEUE_MAX; type++) { + if (eth_ipa_queue_type_enabled(type)) { + ret = ethqos_ipa_offload_connect(ethqos, type); + if (ret) { + ETHQOSERR("Connect Failed, type %d\n", + type); + eth_ipa_ctx.ipa_offload_conn = false; + goto fail; + } + } } - ETHQOSDBG("IPA Offload Connect Successfully\n"); + ETHQOSINFO("IPA Offload Connect Successfully\n"); eth_ipa_ctx.ipa_offload_conn = true; - /*Initialize DMA CHs for offload*/ - ret = ethqos_init_offload(ethqos); - if (ret) { - ETHQOSERR("Offload channel Init Failed\n"); - goto fail; + for (type = 0; type < IPA_QUEUE_MAX; type++) { + if (eth_ipa_queue_type_enabled(type)) { + /*Initialize DMA CHs for offload*/ + ret = ethqos_init_offload(ethqos, type); + if (ret) { + ETHQOSERR("%s: channel Init Failed\n", + __func__); + goto fail; + } + } } } if (!eth_ipa_ctx.ipa_debugfs_exists) { if (!ethqos_ipa_create_debugfs(ethqos)) { - ETHQOSDBG("eMAC Debugfs created\n"); + ETHQOSERR("eMAC Debugfs created\n"); eth_ipa_ctx.ipa_debugfs_exists = true; } else { ETHQOSERR("eMAC Debugfs failed\n"); } } - ETHQOSDBG("IPA Offload Enabled successfully\n"); + ETHQOSINFO("IPA Offload Enabled successfully\n"); return ret; fail: if (eth_ipa_ctx.ipa_offload_conn) { - if (ethqos_ipa_offload_disconnect(ethqos)) - ETHQOSERR("IPA Offload Disconnect Failed\n"); - else - ETHQOSDBG("IPA Offload Disconnect Successfully\n"); - eth_ipa_ctx.ipa_offload_conn = false; + for (type = 0; type < IPA_QUEUE_MAX; type++) { + if (eth_ipa_queue_type_enabled(type)) { + if (ethqos_ipa_offload_disconnect(ethqos, + type)) + ETHQOSERR( + "IPA Offload Disconnect Failed\n"); + else + ETHQOSERR( + "IPA Offload Disconnect Successfully\n"); + } + eth_ipa_ctx.ipa_offload_conn = false; + } } if (eth_ipa_ctx.ipa_offload_init) { - if (ethqos_ipa_offload_cleanup(ethqos)) - ETHQOSERR("IPA Offload Cleanup Failed\n"); - else - ETHQOSDBG("IPA Offload Cleanup Success\n"); - eth_ipa_ctx.ipa_offload_init = false; + for (type = 0; type < IPA_QUEUE_MAX; type++) { + if (eth_ipa_queue_type_enabled(type)) { + if (ethqos_ipa_offload_cleanup(ethqos, type)) + ETHQOSERR( + "IPA Offload Cleanup Failed\n"); + else + ETHQOSERR( + "IPA Offload Cleanup Success\n"); + } + eth_ipa_ctx.ipa_offload_init = false; + } } return ret; @@ -2168,7 +2777,7 @@ static void ethqos_ipa_uc_ready_cb(void *user_data) return; } - ETHQOSDBG("Received IPA UC ready callback\n"); + ETHQOSINFO("Received IPA UC ready callback\n"); INIT_WORK(ð_ipa->ntn_ipa_rdy_work, ethqos_ipaucrdy_wq); queue_work(system_unbound_wq, ð_ipa->ntn_ipa_rdy_work); } @@ -2176,18 +2785,26 @@ static void ethqos_ipa_uc_ready_cb(void *user_data) static int ethqos_ipa_uc_ready(struct qcom_ethqos *pdata) { struct ipa_uc_ready_params param; - int ret; + int ret, type; ETHQOSDBG("Enter\n"); param.is_uC_ready = false; param.priv = pdata; param.notify = ethqos_ipa_uc_ready_cb; - param.proto = IPA_UC_NTN; + + for (type = 0; type < IPA_QUEUE_MAX; type++) { + if (eth_ipa_queue_type_enabled(type)) { + // Register only for one enabled proto. + // Do not need for all protos + param.proto = eth_ipa_queue_type_to_proto(type); + break; + } + } ret = ipa_uc_offload_reg_rdyCB(¶m); if (ret == 0 && param.is_uC_ready) { - ETHQOSDBG("ipa uc ready\n"); + ETHQOSINFO("ipa uc ready\n"); eth_ipa_ctx.ipa_uc_ready = true; } ETHQOSDBG("Exit\n"); @@ -2197,191 +2814,212 @@ static int ethqos_ipa_uc_ready(struct qcom_ethqos *pdata) void ethqos_ipa_offload_event_handler(void *data, int ev) { - int ret; + int type; + ETHQOSDBG("Enter: event=%d\n", ev); - if (ev == EV_PROBE_INIT) { - eth_ipa_ctx.ethqos = data; - mutex_init(ð_ipa_ctx.ipa_lock); - ret = - of_property_read_u32(eth_ipa_ctx.ethqos->pdev->dev.of_node, - "ipa-dma-rx-desc-cnt", - ð_ipa_ctx.ipa_dma_rx_desc_cnt); - if (ret) { - ETHQOSDBG(":resource ipa-dma-rx-desc-cnt not in dt\n"); - eth_ipa_ctx.ipa_dma_rx_desc_cnt = IPA_RX_DESC_CNT; + /* Handle events outside IPA lock */ + switch (ev) { + case EV_PROBE_INIT: + { + eth_ipa_ctx.ethqos = data; + eth_ipa_ctx_init(); + eth_ipa_net_drv_init(); + return; } - - ret = - of_property_read_u32(eth_ipa_ctx.ethqos->pdev->dev.of_node, - "ipa-dma-tx-desc-cnt", - ð_ipa_ctx.ipa_dma_tx_desc_cnt); - if (ret) { - ETHQOSDBG(":resource ipa-dma-tx-desc-cnt not in dt\n"); - eth_ipa_ctx.ipa_dma_tx_desc_cnt = IPA_TX_DESC_CNT; + break; + case EV_IPA_HANDLE_RX_INTR: + { + eth_ipa_handle_rx_interrupt(*(int *)data); + return; } - return; + break; + case EV_IPA_HANDLE_TX_INTR: + { + eth_ipa_handle_tx_interrupt(*(int *)data); + return; + } + break; + default: + break; } IPA_LOCK(); switch (ev) { case EV_PHY_LINK_DOWN: - { - if (!eth_ipa_ctx.emac_dev_ready || - !eth_ipa_ctx.ipa_uc_ready || - eth_ipa_ctx.ipa_offload_link_down || - eth_ipa_ctx.ipa_offload_susp || - !eth_ipa_ctx.ipa_offload_conn) - break; + if (!eth_ipa_ctx.emac_dev_ready || + !eth_ipa_ctx.ipa_uc_ready || + eth_ipa_ctx.ipa_offload_link_down || + eth_ipa_ctx.ipa_offload_susp || + !eth_ipa_ctx.ipa_offload_conn) + break; + + if (!ethqos_ipa_offload_suspend(eth_ipa_ctx.ethqos)) + eth_ipa_ctx.ipa_offload_link_down = true; - if (!ethqos_ipa_offload_suspend(eth_ipa_ctx.ethqos)) - eth_ipa_ctx.ipa_offload_link_down = true; - } break; case EV_PHY_LINK_UP: - { - if (!eth_ipa_ctx.emac_dev_ready || - !eth_ipa_ctx.ipa_uc_ready || - eth_ipa_ctx.ipa_offload_susp) - break; - - /* Link up event is expected only after link down */ - if (eth_ipa_ctx.ipa_offload_link_down) { - ethqos_ipa_offload_resume(eth_ipa_ctx.ethqos); - } else if (eth_ipa_ctx.emac_dev_ready && - eth_ipa_ctx.ipa_uc_ready) { - ethqos_enable_ipa_offload(eth_ipa_ctx.ethqos); - } + if (!eth_ipa_ctx.emac_dev_ready || + !eth_ipa_ctx.ipa_uc_ready || + eth_ipa_ctx.ipa_offload_susp) + break; + + /* Link up event is expected only after link down */ + if (eth_ipa_ctx.ipa_offload_link_down) + ethqos_ipa_offload_resume(eth_ipa_ctx.ethqos); + else if (eth_ipa_ctx.emac_dev_ready && + eth_ipa_ctx.ipa_uc_ready) + ethqos_enable_ipa_offload(eth_ipa_ctx.ethqos); + + eth_ipa_ctx.ipa_offload_link_down = false; - eth_ipa_ctx.ipa_offload_link_down = false; - } break; case EV_DEV_OPEN: - { - ethqos_ipa_config_queues(eth_ipa_ctx.ethqos); - eth_ipa_ctx.emac_dev_ready = true; + ethqos_ipa_config_queues(eth_ipa_ctx.ethqos); - if (!eth_ipa_ctx.ipa_ready) - ethqos_ipa_ready(eth_ipa_ctx.ethqos); + eth_ipa_ctx.emac_dev_ready = true; + + if (!eth_ipa_ctx.ipa_ready) + ethqos_ipa_ready(eth_ipa_ctx.ethqos); + + if (!eth_ipa_ctx.ipa_uc_ready) + ethqos_ipa_uc_ready(eth_ipa_ctx.ethqos); - if (!eth_ipa_ctx.ipa_uc_ready) - ethqos_ipa_uc_ready(eth_ipa_ctx.ethqos); - } break; case EV_IPA_READY: - { - eth_ipa_ctx.ipa_ready = true; + eth_ipa_ctx.ipa_ready = true; - if (!eth_ipa_ctx.ipa_uc_ready) - ethqos_ipa_uc_ready(eth_ipa_ctx.ethqos); + if (!eth_ipa_ctx.ipa_uc_ready) + ethqos_ipa_uc_ready(eth_ipa_ctx.ethqos); + + if (eth_ipa_ctx.ipa_uc_ready && + qcom_ethqos_is_phy_link_up(eth_ipa_ctx.ethqos)) + ethqos_enable_ipa_offload(eth_ipa_ctx.ethqos); - if (eth_ipa_ctx.ipa_uc_ready && - qcom_ethqos_is_phy_link_up(eth_ipa_ctx.ethqos)) - ethqos_enable_ipa_offload(eth_ipa_ctx.ethqos); - } break; case EV_IPA_UC_READY: - { - eth_ipa_ctx.ipa_uc_ready = true; - ETHQOSDBG("ipa uC is ready\n"); - - if (!eth_ipa_ctx.emac_dev_ready) - break; - if (eth_ipa_ctx.ipa_ready) { - if (!eth_ipa_ctx.ipa_offload_init) { - if (!ethqos_ipa_offload_init( - eth_ipa_ctx.ethqos)) - eth_ipa_ctx.ipa_offload_init - = true; - } + eth_ipa_ctx.ipa_uc_ready = true; + ETHQOSINFO("ipa uC is ready\n"); + + if (!eth_ipa_ctx.emac_dev_ready) + break; + if (eth_ipa_ctx.ipa_ready && !eth_ipa_ctx.ipa_offload_init) { + for (type = 0; type < IPA_QUEUE_MAX; type++) { + if (eth_ipa_queue_type_enabled(type)) { + eth_ipa_ctx.ipa_offload_init = + !ethqos_ipa_offload_init( + eth_ipa_ctx.ethqos, + type); } - if (qcom_ethqos_is_phy_link_up(eth_ipa_ctx.ethqos)) - ethqos_enable_ipa_offload(eth_ipa_ctx.ethqos); + } } + if (qcom_ethqos_is_phy_link_up(eth_ipa_ctx.ethqos)) + ethqos_enable_ipa_offload(eth_ipa_ctx.ethqos); + break; case EV_DEV_CLOSE: - { - eth_ipa_ctx.emac_dev_ready = false; - - if (eth_ipa_ctx.ipa_uc_ready) - ipa_uc_offload_dereg_rdyCB(IPA_UC_NTN); + eth_ipa_ctx.emac_dev_ready = false; + + for (type = 0; type < IPA_QUEUE_MAX; type++) { + if (eth_ipa_queue_type_enabled(type)) { + /* Deregister only for 1st enabled proto */ + if (eth_ipa_ctx.ipa_uc_ready) { + ipa_uc_offload_dereg_rdyCB( + eth_ipa_queue_type_to_proto( + type)); + break; + } + } + } - ethqos_disable_ipa_offload(eth_ipa_ctx.ethqos); + ethqos_disable_ipa_offload(eth_ipa_ctx.ethqos); - /* reset link down on dev close */ - eth_ipa_ctx.ipa_offload_link_down = 0; - ethqos_free_ipa_queue_mem(eth_ipa_ctx.ethqos); + /* reset link down on dev close */ + eth_ipa_ctx.ipa_offload_link_down = 0; + ethqos_free_ipa_queue_mem(eth_ipa_ctx.ethqos); - } break; case EV_DPM_SUSPEND: - { - if (eth_ipa_ctx.ipa_offload_conn) - *(int *)data = false; - else - *(int *)data = true; - } + if (eth_ipa_ctx.ipa_offload_conn) + *(int *)data = false; + else + *(int *)data = true; + break; case EV_USR_SUSPEND: - { - if (!eth_ipa_ctx.ipa_offload_susp && - !eth_ipa_ctx.ipa_offload_link_down) - if (!ethqos_ipa_offload_suspend( - eth_ipa_ctx.ethqos)) - eth_ipa_ctx.ipa_offload_susp - = true; - } + if (!eth_ipa_ctx.ipa_offload_susp && + !eth_ipa_ctx.ipa_offload_link_down) + if (!ethqos_ipa_offload_suspend(eth_ipa_ctx.ethqos)) + eth_ipa_ctx.ipa_offload_susp = true; + break; case EV_DPM_RESUME: - { - if (qcom_ethqos_is_phy_link_up(eth_ipa_ctx.ethqos)) { - if (!ethqos_ipa_offload_resume( - eth_ipa_ctx.ethqos)) - eth_ipa_ctx.ipa_offload_susp - = false; - } else { - /* Reset flag here to allow connection - * of pipes on next PHY link up - */ - eth_ipa_ctx.ipa_offload_susp - = false; - /* PHY link is down at resume */ - /* Reset flag here to allow connection - * of pipes on next PHY link up - */ - eth_ipa_ctx.ipa_offload_link_down - = true; - } - } + if (qcom_ethqos_is_phy_link_up(eth_ipa_ctx.ethqos)) { + if (!ethqos_ipa_offload_resume(eth_ipa_ctx.ethqos)) + eth_ipa_ctx.ipa_offload_susp = false; + } else { + /* Reset flag here to allow connection + * of pipes on next PHY link up + */ + eth_ipa_ctx.ipa_offload_susp = false; + /* PHY link is down at resume */ + /* Reset flag here to allow connection + * of pipes on next PHY link up + */ + eth_ipa_ctx.ipa_offload_link_down = true; + } + break; case EV_USR_RESUME: - { - if (eth_ipa_ctx.ipa_offload_susp) { - if (!ethqos_ipa_offload_resume( - eth_ipa_ctx.ethqos)) - eth_ipa_ctx.ipa_offload_susp - = false; - } - } + if (eth_ipa_ctx.ipa_offload_susp) + if (!ethqos_ipa_offload_resume(eth_ipa_ctx.ethqos)) + eth_ipa_ctx.ipa_offload_susp = false; + break; case EV_IPA_OFFLOAD_REMOVE: - { - ethqos_rx_buf_free_mem(eth_ipa_ctx.ethqos, IPA_DMA_RX_CH); - ethqos_tx_buf_free_mem(eth_ipa_ctx.ethqos, IPA_DMA_TX_CH); - ethqos_rx_desc_free_mem(eth_ipa_ctx.ethqos, IPA_DMA_RX_CH); - ethqos_tx_desc_free_mem(eth_ipa_ctx.ethqos, IPA_DMA_TX_CH); - ethqos_free_ipa_rx_queue_struct(eth_ipa_ctx.ethqos); - ethqos_free_ipa_tx_queue_struct(eth_ipa_ctx.ethqos); + ethqos_free_ipa_queue_mem(eth_ipa_ctx.ethqos); + + break; + case EV_QTI_GET_CONN_STATUS: + if (eth_ipa_ctx.queue_enabled[IPA_QUEUE_CV2X]) + *(u8 *)data = eth_ipa_ctx.ipa_offload_conn ? + ETH_EVT_CV2X_PIPE_CONNECTED : + ETH_EVT_CV2X_PIPE_DISCONNECTED; + else + *(u8 *)data = ETH_EVT_CV2X_MODE_NOT_ENABLED; + + break; + case EV_QTI_CHECK_CONN_UPDATE: + /* check if status is updated */ + if (eth_ipa_ctx.ipa_offload_conn_prev != + eth_ipa_ctx.ipa_offload_conn) { + *(int *)data = true; + eth_ipa_ctx.ipa_offload_conn_prev = + eth_ipa_ctx.ipa_offload_conn; + } else { + *(int *)data = false; } + break; case EV_INVALID: default: - { - } break; } + /* Wake up the /dev/emac queue if there is connect status changed */ + /* This should be done only if CV2X queue is enabled */ + /* Do only for event which can actually alter pipe connection */ + if (eth_ipa_ctx.queue_enabled[IPA_QUEUE_CV2X] && + (ev == EV_USR_SUSPEND || ev == EV_USR_RESUME || + ev == EV_DEV_CLOSE || ev == EV_DEV_OPEN || + ev == EV_PHY_LINK_DOWN || ev == EV_PHY_LINK_UP)) { + if (eth_ipa_ctx.ipa_offload_conn_prev != + eth_ipa_ctx.ipa_offload_conn) + ETHQOSDBG("need-status-updated\n"); + ethqos_wakeup_dev_emac_queue(); + } + ETHQOSDBG("Exit: event=%d\n", ev); IPA_UNLOCK(); } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ipa.h b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ipa.h index ee32f4e2dbfe27228c73289db55ad72816ffd696..17c9a86d36a849f4ed53f4bcc9a98f4549b61833 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ipa.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ipa.h @@ -20,22 +20,6 @@ #define IPA_LOCK() mutex_lock(ð_ipa_ctx.ipa_lock) #define IPA_UNLOCK() mutex_unlock(ð_ipa_ctx.ipa_lock) -static char * const IPA_OFFLOAD_EVENT_string[] = { - "EV_INVALID", - "EV_DEV_OPEN", - "EV_DEV_CLOSE", - "EV_IPA_READY", - "EV_IPA_UC_READY", - "EV_PHY_LINK_UP", - "EV_PHY_LINK_DOWN", - "EV_DPM_SUSPEND", - "EV_DPM_RESUME", - "EV_USR_SUSPEND", - "EV_USR_RESUME", - "EV_IPA_OFFLOAD_MAX" -}; - - #define GET_VALUE(data, lbit, lbit2, hbit) ((data >> lbit) & \ (~(~0 << (hbit - lbit2 + 1)))) @@ -64,22 +48,29 @@ static char * const IPA_OFFLOAD_EVENT_string[] = { #define GET_RX_CURRENT_RCVD_LAST_DESC_INDEX(start_index, offset, desc_cnt)\ (desc_cnt - 1) -#define GET_RX_DESC_IDX(QINX, desc)\ - (((desc) - eth_ipa_ctx.rx_queue->rx_desc_dma_addrs[0]) / \ +#define GET_RX_DESC_IDX(type, desc)\ + (((desc) - eth_ipa_ctx.rx_queue[type]->rx_desc_dma_addrs[0]) / \ (sizeof(struct dma_desc))) -#define GET_TX_DESC_IDX(QINX, desc)\ - (((desc) - eth_ipa_ctx.tx_queue->tx_desc_dma_addrs[0]) / \ +#define GET_TX_DESC_IDX(type, desc)\ + (((desc) - eth_ipa_ctx.tx_queue[type]->tx_desc_dma_addrs[0]) / \ (sizeof(struct dma_desc))) #define DMA_CR0_RGOFFADDR ((BASE_ADDRESS + 0x1100)) +#define DMA_CR3_RGOFFADDR ((BASE_ADDRESS + 0x1280)) +#define DMA_CR4_RGOFFADDR ((BASE_ADDRESS + 0x1300)) -#define ETHQOS_ETH_FRAME_LEN_IPA ((1 << 11)) /*IPA can support 2KB max length*/ +/* IPA can support 2KB max length */ +#define ETHQOS_ETH_FRAME_LEN_IPA_BE ((1 << 11)) +#define ETHQOS_ETH_FRAME_LEN_IPA_CV2X ((1 << 11)) -#define IPA_TX_DESC_CNT 128 /*Default TX desc count to 128 for IPA offload*/ -#define IPA_RX_DESC_CNT 128 /*Default RX desc count to 128 for IPA offload*/ +/* Default desc count */ +#define IPA_TX_DESC_CNT_BE 128 +#define IPA_RX_DESC_CNT_BE 128 +#define IPA_TX_DESC_CNT_CV2X 128 +#define IPA_RX_DESC_CNT_CV2X 128 -#define BASE_ADDRESS (ethqos->ioaddr) +#define BASE_ADDRESS (eth_ipa_ctx.ethqos->ioaddr) #define DMA_TDRLR_RGOFFADDR (BASE_ADDRESS + 0x112c) @@ -460,6 +451,8 @@ static char * const IPA_OFFLOAD_EVENT_string[] = { #define DMA_DSR0_RGRD(data) \ ((data) = readl_relaxed(DMA_DSR0_RGOFFADDR)) +#define DMA_DSR1_RGOFFADDR ((BASE_ADDRESS + 0x1010)) + #define DMA_CHRDR_RGOFFADDR (BASE_ADDRESS + 0x114c) #define DMA_CHRDR_RGOFFADDRESS(i)\ @@ -503,6 +496,21 @@ static char * const IPA_OFFLOAD_EVENT_string[] = { #define DMA_CHTDR_RGRD(i, data) \ ((data) = readl_relaxed(DMA_CHTDR_RGOFFADDRESS(i))) +#define DMA_CHTBAR_RGOFFADDR (BASE_ADDRESS + 0x1154) + +#define DMA_CHTBAR_RGOFFADDRESS(i)\ + ((DMA_CHTBAR_RGOFFADDR + ((i - 0) * 128))) + +#define DMA_CHRBAR_RGOFFADDR (BASE_ADDRESS + 0x115c) + +#define DMA_CHRBAR_RGOFFADDRESS(i)\ + ((DMA_CHRBAR_RGOFFADDR + ((i - 0) * 128))) + +#define DMA_CH_MISS_FRAME_CNT_RGOFFADDR (BASE_ADDRESS + 0x1164) + +#define DMA_CH_MISS_FRAME_CNT_RGOFFADDRESS(i)\ + ((DMA_CH_MISS_FRAME_CNT_RGOFFADDR + ((i - 0) * 128))) + #define DMA_TDTP_TPDR_RGOFFADDR (BASE_ADDRESS + 0x1120) #define DMA_TDTP_TPDR_RGOFFADDRESS(i)\ @@ -554,6 +562,8 @@ static char * const IPA_OFFLOAD_EVENT_string[] = { data = ((data1 >> 13) & DMA_IER_CDEE_MASK);\ } while (0) +#define DMA_ISR_RGOFFADDR ((BASE_ADDRESS + 0x1008)) + struct ethqos_tx_queue { struct stmmac_tx_queue *tx_q; unsigned int desc_cnt; @@ -562,13 +572,13 @@ struct ethqos_tx_queue { void **ipa_tx_buff_pool_va_addrs_base; - dma_addr_t *ipa_tx_buff_pool_pa_addrs_base; - dma_addr_t ipa_tx_buff_pool_pa_addrs_base_dmahndl; + dma_addr_t *ipa_tx_pa_addrs_base; + dma_addr_t ipa_tx_pa_addrs_base_dmahndl; dma_addr_t *skb_dma; /* dma address of skb */ struct sk_buff **skb; /* virtual address of skb */ unsigned short *len; /* length of first skb */ - phys_addr_t *ipa_tx_buff_phy_addr; /* physical address of ipa TX buff */ + phys_addr_t *ipa_tx_phy_addr; /* physical address of ipa TX buff */ }; struct ethqos_rx_queue { @@ -580,8 +590,8 @@ struct ethqos_rx_queue { void **ipa_rx_buff_pool_va_addrs_base; - dma_addr_t *ipa_rx_buff_pool_pa_addrs_base; - dma_addr_t ipa_rx_buff_pool_pa_addrs_base_dmahndl; + dma_addr_t *ipa_rx_pa_addrs_base; + dma_addr_t ipa_rx_pa_addrs_base_dmahndl; dma_addr_t *skb_dma; /* dma address of skb */ struct sk_buff **skb; /* virtual address of skb */ @@ -637,15 +647,60 @@ struct ethqos_ipa_stats { }; struct ethqos_prv_ipa_data { - struct ethqos_tx_queue *tx_queue; - struct ethqos_rx_queue *rx_queue; + bool queue_enabled[IPA_QUEUE_MAX]; + struct ethqos_tx_queue *tx_queue[IPA_QUEUE_MAX]; + struct ethqos_rx_queue *rx_queue[IPA_QUEUE_MAX]; + + void __iomem *uc_db_rx_addr[IPA_QUEUE_MAX]; + void __iomem *uc_db_tx_addr[IPA_QUEUE_MAX]; + u32 ipa_client_hndl[IPA_QUEUE_MAX]; + + /* desc count */ + u32 ipa_dma_tx_desc_cnt[IPA_QUEUE_MAX]; + u32 ipa_dma_rx_desc_cnt[IPA_QUEUE_MAX]; + + /* intr moderation count only for RX */ + /* TX is taken care by IPA */ + u32 rx_intr_mod_cnt[IPA_QUEUE_MAX]; + + /* interrupt routing mode */ + enum ipa_intr_route_type tx_intr_route_mode[IPA_QUEUE_MAX]; + enum ipa_intr_route_type rx_intr_route_mode[IPA_QUEUE_MAX]; + + /* queue/chan number*/ + u8 tx_queue_num[IPA_QUEUE_MAX]; + u8 rx_queue_num[IPA_QUEUE_MAX]; + + /* buffer lens */ + u32 buf_len[IPA_QUEUE_MAX]; + + /* ipa cb for rx exception packets */ + ipa_notify_cb ipa_notify_cb[IPA_QUEUE_MAX]; + + /* IPA protocol */ + u32 ipa_proto[IPA_QUEUE_MAX]; + + /* IPA client enums prod/cons */ + u32 tx_client[IPA_QUEUE_MAX]; + u32 rx_client[IPA_QUEUE_MAX]; + + /* rx channel reg base ptr */ + phys_addr_t rx_reg_base_ptr_pa[IPA_QUEUE_MAX]; + + /* tx channel reg base ptr */ + phys_addr_t tx_reg_base_ptr_pa[IPA_QUEUE_MAX]; + + /* set if ipa_send_message is needed for a queue type */ + bool need_send_msg[IPA_QUEUE_MAX]; + + /* network device name*/ + char netdev_name[IPA_QUEUE_MAX][ETH_DEV_NAME_LEN]; - phys_addr_t uc_db_rx_addr; - phys_addr_t uc_db_tx_addr; - u32 ipa_client_hndl; + /* network device index */ + u8 netdev_index[IPA_QUEUE_MAX]; - u32 ipa_dma_tx_desc_cnt; - u32 ipa_dma_rx_desc_cnt; + /* network device addr */ + u8 netdev_addr[IPA_QUEUE_MAX][ETH_ALEN]; /* IPA state variables */ /* State of EMAC HW initialization */ @@ -658,6 +713,8 @@ struct ethqos_prv_ipa_data { bool ipa_offload_init; /* State of IPA pipes connection */ bool ipa_offload_conn; + /* State of IPA pipes connection previously */ + bool ipa_offload_conn_prev; /* State of debugfs creation */ bool ipa_debugfs_exists; /* State of IPA offload suspended by user */ @@ -676,9 +733,13 @@ struct ethqos_prv_ipa_data { struct dentry *debugfs_ipa_stats; struct dentry *debugfs_dma_stats; struct dentry *debugfs_suspend_ipa_offload; - struct ethqos_ipa_stats ipa_stats; + struct ethqos_ipa_stats ipa_stats[IPA_QUEUE_MAX]; struct qcom_ethqos *ethqos; }; +static void ntn_ipa_notify_cb_be( + void *priv, enum ipa_dp_evt_type evt, unsigned long data); +static void ntn_ipa_notify_cb_cv2x( + void *priv, enum ipa_dp_evt_type evt, unsigned long data); #endif diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-pps.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-pps.c index 4cc6b09d1a1b0ec03c739ca7a918825fbf1e2bdc..308a5bd7757d5aea0e302c59580d0e982d97fce3 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-pps.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-pps.c @@ -40,18 +40,15 @@ static DECLARE_WAIT_QUEUE_HEAD(avb_class_b_msg_wq); static int strlcmp(const char *s, const char *t, size_t n) { - int ret; - while (n-- && *t != '\0') { if (*s != *t) { - ret = ((unsigned char)*s - (unsigned char)*t); + return ((unsigned char)*s - (unsigned char)*t); n = 0; } else { ++s, ++t; - ret = (unsigned char)*s; } } - return ret; + return (unsigned char)*s; } static void align_target_time_reg(u32 ch, void __iomem *ioaddr, diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index 1c0cb71865ee421dc29b160508088a036d034419..4d95dbc8d734c8df32f535cd77c658bee3a10263 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -159,6 +159,7 @@ struct stmmac_priv { struct dentry *dbgfs_rings_status; struct dentry *dbgfs_dma_cap; #endif + bool hw_offload_enabled; }; struct stmmac_emb_smmu_cb_ctx { diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 2e87195626e419a6bef667a9f2a39f8e79797887..c6a8c23f461d208e01591ee86214eb2e19a974e7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -296,6 +296,12 @@ static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue) static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv) { if (likely(priv->plat->fix_mac_speed)) { + if (priv->plat->mac2mac_en) { + priv->plat->fix_mac_speed(priv->plat->bsp_priv, + priv->speed); + return; + } + if (priv->phydev->link) priv->plat->fix_mac_speed(priv->plat->bsp_priv, priv->speed); @@ -873,11 +879,11 @@ static void stmmac_adjust_link(struct net_device *dev) mutex_unlock(&priv->lock); if (new_state) { - if (phydev->link == 1 && priv->tx_queue[IPA_DMA_TX_CH].skip_sw) + if (phydev->link == 1 && priv->hw_offload_enabled) ethqos_ipa_offload_event_handler(priv, EV_PHY_LINK_UP); else if (phydev->link == 0 && - priv->tx_queue[IPA_DMA_TX_CH].skip_sw) + priv->hw_offload_enabled) ethqos_ipa_offload_event_handler(priv, EV_PHY_LINK_DOWN); } @@ -1076,13 +1082,13 @@ static int stmmac_init_phy(struct net_device *dev) } if (phy_intr_en) { + phydev->irq = PHY_IGNORE_INTERRUPT; + phydev->interrupts = PHY_INTERRUPT_ENABLED; if (phydev->drv->config_intr && !phydev->drv->config_intr(phydev)) { pr_debug(" qcom-ethqos: %s config_phy_intr successful\n", __func__); qcom_ethqos_request_phy_wol(priv->plat); - phydev->irq = PHY_IGNORE_INTERRUPT; - phydev->interrupts = PHY_INTERRUPT_ENABLED; } else { pr_alert("Unable to register PHY IRQ\n"); phydev->irq = PHY_POLL; @@ -2174,6 +2180,13 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv) status = priv->hw->dma->dma_interrupt(priv->ioaddr, &priv->xstats, chan); + if (priv->rx_queue[chan].skip_sw && (status & handle_rx)) + ethqos_ipa_offload_event_handler( + &chan, EV_IPA_HANDLE_RX_INTR); + if (priv->tx_queue[chan].skip_sw && (status & handle_tx)) + ethqos_ipa_offload_event_handler( + &chan, EV_IPA_HANDLE_TX_INTR); + if ((likely((status & handle_rx)) || (status & handle_tx)) && !rx_q->skip_sw) { if (likely(napi_schedule_prep(&rx_q->napi))) { @@ -2835,7 +2848,7 @@ static int stmmac_open(struct net_device *dev) stmmac_enable_all_queues(priv); stmmac_start_all_queues(priv); - if (priv->tx_queue[IPA_DMA_TX_CH].skip_sw) + if (priv->hw_offload_enabled) ethqos_ipa_offload_event_handler(priv, EV_DEV_OPEN); if (priv->plat->mac2mac_en) { @@ -2918,7 +2931,7 @@ static int stmmac_release(struct net_device *dev) /* Release and free the Rx/Tx resources */ free_dma_desc_resources(priv); - if (priv->tx_queue[IPA_DMA_TX_CH].skip_sw) + if (priv->hw_offload_enabled) ethqos_ipa_offload_event_handler(priv, EV_DEV_CLOSE); /* Disable the MAC Rx/Tx */ diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c index 382993c1561c5c9b071138d0f8ca0def10743e83..7e5c0f182770d774c935cfd2b257fd66b4f3f294 100644 --- a/drivers/net/ethernet/sun/cassini.c +++ b/drivers/net/ethernet/sun/cassini.c @@ -4983,7 +4983,7 @@ static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) cas_cacheline_size)) { dev_err(&pdev->dev, "Could not set PCI cache " "line size\n"); - goto err_write_cacheline; + goto err_out_free_res; } } #endif @@ -5158,7 +5158,6 @@ static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) err_out_free_res: pci_release_regions(pdev); -err_write_cacheline: /* Try to restore it in case the error occurred after we * set it. */ diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 55c4b295ed0ef9955dab944042b1ad002069ca0d..3c9f8770f7e782c7b23f141ed97592f725427573 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -796,7 +796,9 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb, if (dst) return dst; } - if (ipv6_stub->ipv6_dst_lookup(geneve->net, gs6->sock->sk, &dst, fl6)) { + dst = ipv6_stub->ipv6_dst_lookup_flow(geneve->net, gs6->sock->sk, fl6, + NULL); + if (IS_ERR(dst)) { netdev_dbg(dev, "no route to %pI6\n", &fl6->daddr); return ERR_PTR(-ENETUNREACH); } @@ -913,9 +915,10 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev) if (geneve->collect_md) { info = skb_tunnel_info(skb); if (unlikely(!info || !(info->mode & IP_TUNNEL_INFO_TX))) { - err = -EINVAL; netdev_dbg(dev, "no tunnel metadata\n"); - goto tx_error; + dev_kfree_skb(skb); + dev->stats.tx_dropped++; + return NETDEV_TX_OK; } } else { info = &geneve->info; @@ -932,7 +935,7 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev) if (likely(!err)) return NETDEV_TX_OK; -tx_error: + dev_kfree_skb(skb); if (err == -ELOOP) @@ -1369,21 +1372,33 @@ static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[], } if (data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX]) { +#if IS_ENABLED(CONFIG_IPV6) if (changelink) { attrtype = IFLA_GENEVE_UDP_ZERO_CSUM6_TX; goto change_notsup; } if (nla_get_u8(data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX])) info->key.tun_flags &= ~TUNNEL_CSUM; +#else + NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX], + "IPv6 support not enabled in the kernel"); + return -EPFNOSUPPORT; +#endif } if (data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX]) { +#if IS_ENABLED(CONFIG_IPV6) if (changelink) { attrtype = IFLA_GENEVE_UDP_ZERO_CSUM6_RX; goto change_notsup; } if (nla_get_u8(data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX])) *use_udp6_rx_checksums = false; +#else + NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX], + "IPv6 support not enabled in the kernel"); + return -EPFNOSUPPORT; +#endif } return 0; @@ -1559,11 +1574,13 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev) goto nla_put_failure; if (metadata && nla_put_flag(skb, IFLA_GENEVE_COLLECT_METADATA)) - goto nla_put_failure; + goto nla_put_failure; +#if IS_ENABLED(CONFIG_IPV6) if (nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_RX, !geneve->use_udp6_rx_checksums)) goto nla_put_failure; +#endif return 0; diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index 92e4e5d53053ffaf46dfe92493f1e4cf969c7c7a..090607e725a244be9a0b1a80ef18d4005a33210f 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c @@ -1177,11 +1177,11 @@ static int gtp_genl_del_pdp(struct sk_buff *skb, struct genl_info *info) static struct genl_family gtp_genl_family; static int gtp_genl_fill_info(struct sk_buff *skb, u32 snd_portid, u32 snd_seq, - u32 type, struct pdp_ctx *pctx) + int flags, u32 type, struct pdp_ctx *pctx) { void *genlh; - genlh = genlmsg_put(skb, snd_portid, snd_seq, >p_genl_family, 0, + genlh = genlmsg_put(skb, snd_portid, snd_seq, >p_genl_family, flags, type); if (genlh == NULL) goto nlmsg_failure; @@ -1235,8 +1235,8 @@ static int gtp_genl_get_pdp(struct sk_buff *skb, struct genl_info *info) goto err_unlock; } - err = gtp_genl_fill_info(skb2, NETLINK_CB(skb).portid, - info->snd_seq, info->nlhdr->nlmsg_type, pctx); + err = gtp_genl_fill_info(skb2, NETLINK_CB(skb).portid, info->snd_seq, + 0, info->nlhdr->nlmsg_type, pctx); if (err < 0) goto err_unlock_free; @@ -1279,6 +1279,7 @@ static int gtp_genl_dump_pdp(struct sk_buff *skb, gtp_genl_fill_info(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, + NLM_F_MULTI, cb->nlh->nlmsg_type, pctx)) { cb->args[0] = i; cb->args[1] = j; diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c index 16a6e11939122aabb922666b9fc6885d05802729..b74c735a423ddc82da8ba6ad00bc67b6e599a03b 100644 --- a/drivers/net/hamradio/yam.c +++ b/drivers/net/hamradio/yam.c @@ -1162,6 +1162,7 @@ static int __init yam_init_driver(void) err = register_netdev(dev); if (err) { printk(KERN_WARNING "yam: cannot register net device %s\n", dev->name); + free_netdev(dev); goto error; } yam_devs[i] = dev; diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c index d7ba2b813effc5d32efcefb8028f7eed723e5e98..40ef4aeb0ef04be65566a803211ae45bf6a1a595 100644 --- a/drivers/net/hippi/rrunner.c +++ b/drivers/net/hippi/rrunner.c @@ -1250,7 +1250,7 @@ static int rr_open(struct net_device *dev) rrpriv->info = NULL; } if (rrpriv->rx_ctrl) { - pci_free_consistent(pdev, sizeof(struct ring_ctrl), + pci_free_consistent(pdev, 256 * sizeof(struct ring_ctrl), rrpriv->rx_ctrl, rrpriv->rx_ctrl_dma); rrpriv->rx_ctrl = NULL; } diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index 9c9b73f22c0707647ba7a4ce7f8dc96120536a34..e940bb2b6b5db2e53da9557a069ad7d3336309f9 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -1236,7 +1236,8 @@ static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len) struct crypto_aead *tfm; int ret; - tfm = crypto_alloc_aead("gcm(aes)", 0, 0); + /* Pick a sync gcm(aes) cipher to ensure order is preserved. */ + tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) return tfm; diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 3072fc902eca57c79a8130119ddd77d7e40ee42e..b7f41c52766f66866652fe2cf2d1ff43f4b84807 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -449,6 +449,10 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb) int ret; rx_handler_result_t handle_res; + /* Packets from dev_loopback_xmit() do not have L2 header, bail out */ + if (unlikely(skb->pkt_type == PACKET_LOOPBACK)) + return RX_HANDLER_PASS; + port = macvlan_port_get_rcu(skb->dev); if (is_multicast_ether_addr(eth->h_dest)) { unsigned int hash; diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index afebdc2f0b94dfe0bf37417641a4fc24bbfe3b07..5752280fdb4088c3ab3243f78927594a95120763 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c @@ -1110,7 +1110,7 @@ static struct dp83640_clock *dp83640_clock_get_bus(struct mii_bus *bus) goto out; } dp83640_clock_init(clock, bus); - list_add_tail(&phyter_clocks, &clock->list); + list_add_tail(&clock->list, &phyter_clocks); out: mutex_unlock(&phyter_clocks_lock); @@ -1339,6 +1339,7 @@ static int dp83640_hwtstamp(struct phy_device *phydev, struct ifreq *ifr) dp83640->hwts_rx_en = 1; dp83640->layer = PTP_CLASS_L4; dp83640->version = PTP_CLASS_V1; + cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; break; case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: @@ -1346,6 +1347,7 @@ static int dp83640_hwtstamp(struct phy_device *phydev, struct ifreq *ifr) dp83640->hwts_rx_en = 1; dp83640->layer = PTP_CLASS_L4; dp83640->version = PTP_CLASS_V2; + cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; break; case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: @@ -1353,6 +1355,7 @@ static int dp83640_hwtstamp(struct phy_device *phydev, struct ifreq *ifr) dp83640->hwts_rx_en = 1; dp83640->layer = PTP_CLASS_L2; dp83640->version = PTP_CLASS_V2; + cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; break; case HWTSTAMP_FILTER_PTP_V2_EVENT: case HWTSTAMP_FILTER_PTP_V2_SYNC: @@ -1360,6 +1363,7 @@ static int dp83640_hwtstamp(struct phy_device *phydev, struct ifreq *ifr) dp83640->hwts_rx_en = 1; dp83640->layer = PTP_CLASS_L4 | PTP_CLASS_L2; dp83640->version = PTP_CLASS_V2; + cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; break; default: return -ERANGE; diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index cdee06bcd85e3d1272a51b1818cc10c220fc3c37..768eca043f26685ce48d953b940cf5de474649ff 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -724,8 +724,8 @@ static void kszphy_get_strings(struct phy_device *phydev, u8 *data) int i; for (i = 0; i < ARRAY_SIZE(kszphy_hw_stats); i++) { - memcpy(data + i * ETH_GSTRING_LEN, - kszphy_hw_stats[i].string, ETH_GSTRING_LEN); + strlcpy(data + i * ETH_GSTRING_LEN, + kszphy_hw_stats[i].string, ETH_GSTRING_LEN); } } diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 47d2ef2fb9b33102b9b96805b54fa1743ec2aaf1..7989ae4b8387a3f21b6446d13748b3f163671ebf 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -1257,9 +1257,11 @@ int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data) /* Restart autonegotiation so the new modes get sent to the * link partner. */ - ret = phy_restart_aneg(phydev); - if (ret < 0) - return ret; + if (phydev->autoneg == AUTONEG_ENABLE) { + ret = phy_restart_aneg(phydev); + if (ret < 0) + return ret; + } } return 0; diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c index fa7121dcab6760d749fcc1c7ed693cdf03a2128b..202a0f415e1e200d28bd51fee8e105ae659a9513 100644 --- a/drivers/net/ppp/pppoe.c +++ b/drivers/net/ppp/pppoe.c @@ -497,6 +497,9 @@ static int pppoe_disc_rcv(struct sk_buff *skb, struct net_device *dev, if (!skb) goto out; + if (skb->pkt_type != PACKET_HOST) + goto abort; + if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr))) goto abort; diff --git a/drivers/net/usb/ax88172a.c b/drivers/net/usb/ax88172a.c index 914cac55a7ae702b5a3a4dc7178effdac1d0eedb..909755ef71ac3edfd60ff9c76b8e0ed5089c1d70 100644 --- a/drivers/net/usb/ax88172a.c +++ b/drivers/net/usb/ax88172a.c @@ -210,6 +210,7 @@ static int ax88172a_bind(struct usbnet *dev, struct usb_interface *intf) ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf, 0); if (ret < ETH_ALEN) { netdev_err(dev->net, "Failed to read MAC address: %d\n", ret); + ret = -EIO; goto free; } memcpy(dev->net->dev_addr, buf, ETH_ALEN); diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c index c5884c5f04895fdcd98b7ca9aebe98006e6b3630..5a2cdd2ccce6b22ba80a025f6e2c1c70860c214a 100644 --- a/drivers/net/usb/ax88179_178a.c +++ b/drivers/net/usb/ax88179_178a.c @@ -1400,10 +1400,10 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb) } if (pkt_cnt == 0) { - /* Skip IP alignment psudo header */ - skb_pull(skb, 2); skb->len = pkt_len; - skb_set_tail_pointer(skb, pkt_len); + /* Skip IP alignment pseudo header */ + skb_pull(skb, 2); + skb_set_tail_pointer(skb, skb->len); skb->truesize = pkt_len + sizeof(struct sk_buff); ax88179_rx_checksum(skb, pkt_hdr); return 1; @@ -1412,8 +1412,9 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb) ax_skb = skb_clone(skb, GFP_ATOMIC); if (ax_skb) { ax_skb->len = pkt_len; - ax_skb->data = skb->data + 2; - skb_set_tail_pointer(ax_skb, pkt_len); + /* Skip IP alignment pseudo header */ + skb_pull(ax_skb, 2); + skb_set_tail_pointer(ax_skb, ax_skb->len); ax_skb->truesize = pkt_len + sizeof(struct sk_buff); ax88179_rx_checksum(ax_skb, pkt_hdr); usbnet_skb_return(dev, ax_skb); diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index 5a3c19c7ad4cd468003521efee1436c77ac4332b..50ba1ae7afdadb0b0d055ebb1a0372776f2e59b4 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -824,14 +824,21 @@ static const struct usb_device_id products[] = { .driver_info = 0, }, -/* Microsoft Surface 3 dock (based on Realtek RTL8153) */ +/* Microsoft Surface Ethernet Adapter (based on Realtek RTL8153) */ { USB_DEVICE_AND_INTERFACE_INFO(MICROSOFT_VENDOR_ID, 0x07c6, USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), .driver_info = 0, }, - /* TP-LINK UE300 USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */ +/* Microsoft Surface Ethernet Adapter (based on Realtek RTL8153B) */ +{ + USB_DEVICE_AND_INTERFACE_INFO(MICROSOFT_VENDOR_ID, 0x0927, USB_CLASS_COMM, + USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), + .driver_info = 0, +}, + +/* TP-LINK UE300 USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */ { USB_DEVICE_AND_INTERFACE_INFO(TPLINK_VENDOR_ID, 0x0601, USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index e028e03765a5619835ecef72eb885f79a2cd6f72..88b8ba0ad2cda31738124a0613390cc80d3c5aec 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1249,6 +1249,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */ {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ + {QMI_QUIRK_SET_DTR(0x1bc7, 0x1031, 3)}, /* Telit LE910C1-EUX */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */ {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */ {QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */ @@ -1283,6 +1284,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */ {QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */ {QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */ + {QMI_FIXED_INTF(0x413c, 0x81cc, 8)}, /* Dell Wireless 5816e */ {QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */ {QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e preproduction config */ {QMI_FIXED_INTF(0x413c, 0x81e0, 0)}, /* Dell Wireless 5821e with eSIM support*/ @@ -1292,6 +1294,7 @@ static const struct usb_device_id products[] = { {QMI_QUIRK_SET_DTR(0x1e0e, 0x9001, 5)}, /* SIMCom 7100E, 7230E, 7600E ++ */ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */ + {QMI_QUIRK_SET_DTR(0x2c7c, 0x0195, 4)}, /* Quectel EG95 */ {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */ {QMI_QUIRK_SET_DTR(0x2cb7, 0x0104, 4)}, /* Fibocom NL678 series */ {QMI_FIXED_INTF(0x0489, 0xe0b4, 0)}, /* Foxconn T77W968 LTE */ diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index cadf5ded45a9b387219c8f51b77f3eff38ac17b0..e30792380812a5c6850a3ebf499b12945baa3673 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -5329,6 +5329,7 @@ static const struct usb_device_id rtl8152_table[] = { {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8153)}, {REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07ab)}, {REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07c6)}, + {REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0927)}, {REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)}, {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f)}, {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3062)}, diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index fc48da1c702d7bab7627ea804c50ff52345184c7..bc6bcea67bff32f43e1a42db775bff9cfc36779d 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c @@ -1301,11 +1301,14 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf) /* Init all registers */ ret = smsc95xx_reset(dev); + if (ret) + goto free_pdata; /* detect device revision as different features may be available */ ret = smsc95xx_read_reg(dev, ID_REV, &val); if (ret < 0) - return ret; + goto free_pdata; + val >>= 16; pdata->chip_id = val; pdata->mdix_ctrl = get_mdix_status(dev->net); @@ -1331,6 +1334,10 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf) schedule_delayed_work(&pdata->carrier_check, CARRIER_CHECK_DELAY); return 0; + +free_pdata: + kfree(pdata); + return ret; } static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf) @@ -1338,7 +1345,7 @@ static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf) struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]); if (pdata) { - cancel_delayed_work(&pdata->carrier_check); + cancel_delayed_work_sync(&pdata->carrier_check); netif_dbg(dev, ifdown, dev->net, "free pdata\n"); kfree(pdata); pdata = NULL; diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c index 2ff27314e04739034cef408b59aed6a77cd98911..66c6c07c7a1662560e99177ea9dfbf427d2c1ba3 100644 --- a/drivers/net/vmxnet3/vmxnet3_ethtool.c +++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c @@ -692,6 +692,8 @@ vmxnet3_get_rss(struct net_device *netdev, u32 *p, u8 *key, u8 *hfunc) *hfunc = ETH_RSS_HASH_TOP; if (!p) return 0; + if (n > UPT1_RSS_MAX_IND_TABLE_SIZE) + return 0; while (n--) p[n] = rssConf->indTable[n]; return 0; diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 927d62c76a604c3214846e005e1203e82076061f..afdc2c290fd036d4699c13a297f36e2a3adc8090 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1610,6 +1610,10 @@ static struct sk_buff *vxlan_na_create(struct sk_buff *request, ns_olen = request->len - skb_network_offset(request) - sizeof(struct ipv6hdr) - sizeof(*ns); for (i = 0; i < ns_olen-1; i += (ns->opt[i+1]<<3)) { + if (!ns->opt[i + 1]) { + kfree_skb(reply); + return NULL; + } if (ns->opt[i] == ND_OPT_SOURCE_LL_ADDR) { daddr = ns->opt + i + sizeof(struct nd_opt_hdr); break; @@ -1962,7 +1966,6 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan, bool use_cache = ip_tunnel_dst_cache_usable(skb, info); struct dst_entry *ndst; struct flowi6 fl6; - int err; if (!sock6) return ERR_PTR(-EIO); @@ -1985,10 +1988,9 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan, fl6.fl6_dport = dport; fl6.fl6_sport = sport; - err = ipv6_stub->ipv6_dst_lookup(vxlan->net, - sock6->sock->sk, - &ndst, &fl6); - if (unlikely(err < 0)) { + ndst = ipv6_stub->ipv6_dst_lookup_flow(vxlan->net, sock6->sock->sk, + &fl6, NULL); + if (unlikely(IS_ERR(ndst))) { netdev_dbg(dev, "no route to %pI6\n", daddr); return ERR_PTR(-ENETUNREACH); } diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c index 0e3f8ed84660e9363552cbf52fc31bff004a08c3..ac34257e9f2036787bdffc512ba47441067cdd46 100644 --- a/drivers/net/wan/lapbether.c +++ b/drivers/net/wan/lapbether.c @@ -308,7 +308,6 @@ static void lapbeth_setup(struct net_device *dev) dev->netdev_ops = &lapbeth_netdev_ops; dev->needs_free_netdev = true; dev->type = ARPHRD_X25; - dev->hard_header_len = 3; dev->mtu = 1000; dev->addr_len = 0; } @@ -329,6 +328,14 @@ static int lapbeth_new_device(struct net_device *dev) if (!ndev) goto out; + /* When transmitting data: + * first this driver removes a pseudo header of 1 byte, + * then the lapb module prepends an LAPB header of at most 3 bytes, + * then this driver prepends a length field of 2 bytes, + * then the underlying Ethernet device prepends its own header. + */ + ndev->hard_header_len = -1 + 3 + 2 + dev->hard_header_len; + lapbeth = netdev_priv(ndev); lapbeth->axdev = ndev; diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c index 6f669166c2632882280263e29da13a463908a0e4..e80d509bc5415e80a1c5c9f377f2b953a261bf03 100644 --- a/drivers/net/wireless/ath/ath9k/hif_usb.c +++ b/drivers/net/wireless/ath/ath9k/hif_usb.c @@ -610,6 +610,11 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev, hif_dev->remain_skb = nskb; spin_unlock(&hif_dev->rx_lock); } else { + if (pool_index == MAX_PKT_NUM_IN_TRANSFER) { + dev_err(&hif_dev->udev->dev, + "ath9k_htc: over RX MAX_PKT_NUM\n"); + goto err; + } nskb = __dev_alloc_skb(pkt_len + 32, GFP_ATOMIC); if (!nskb) { dev_err(&hif_dev->udev->dev, @@ -636,9 +641,9 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev, static void ath9k_hif_usb_rx_cb(struct urb *urb) { - struct sk_buff *skb = (struct sk_buff *) urb->context; - struct hif_device_usb *hif_dev = - usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0)); + struct rx_buf *rx_buf = (struct rx_buf *)urb->context; + struct hif_device_usb *hif_dev = rx_buf->hif_dev; + struct sk_buff *skb = rx_buf->skb; int ret; if (!skb) @@ -678,14 +683,15 @@ static void ath9k_hif_usb_rx_cb(struct urb *urb) return; free: kfree_skb(skb); + kfree(rx_buf); } static void ath9k_hif_usb_reg_in_cb(struct urb *urb) { - struct sk_buff *skb = (struct sk_buff *) urb->context; + struct rx_buf *rx_buf = (struct rx_buf *)urb->context; + struct hif_device_usb *hif_dev = rx_buf->hif_dev; + struct sk_buff *skb = rx_buf->skb; struct sk_buff *nskb; - struct hif_device_usb *hif_dev = - usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0)); int ret; if (!skb) @@ -725,11 +731,13 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb) return; } + rx_buf->skb = nskb; + usb_fill_int_urb(urb, hif_dev->udev, usb_rcvintpipe(hif_dev->udev, USB_REG_IN_PIPE), nskb->data, MAX_REG_IN_BUF_SIZE, - ath9k_hif_usb_reg_in_cb, nskb, 1); + ath9k_hif_usb_reg_in_cb, rx_buf, 1); } resubmit: @@ -743,6 +751,7 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb) return; free: kfree_skb(skb); + kfree(rx_buf); urb->context = NULL; } @@ -788,7 +797,7 @@ static int ath9k_hif_usb_alloc_tx_urbs(struct hif_device_usb *hif_dev) init_usb_anchor(&hif_dev->mgmt_submitted); for (i = 0; i < MAX_TX_URB_NUM; i++) { - tx_buf = kzalloc(sizeof(struct tx_buf), GFP_KERNEL); + tx_buf = kzalloc(sizeof(*tx_buf), GFP_KERNEL); if (!tx_buf) goto err; @@ -825,8 +834,9 @@ static void ath9k_hif_usb_dealloc_rx_urbs(struct hif_device_usb *hif_dev) static int ath9k_hif_usb_alloc_rx_urbs(struct hif_device_usb *hif_dev) { - struct urb *urb = NULL; + struct rx_buf *rx_buf = NULL; struct sk_buff *skb = NULL; + struct urb *urb = NULL; int i, ret; init_usb_anchor(&hif_dev->rx_submitted); @@ -834,6 +844,12 @@ static int ath9k_hif_usb_alloc_rx_urbs(struct hif_device_usb *hif_dev) for (i = 0; i < MAX_RX_URB_NUM; i++) { + rx_buf = kzalloc(sizeof(*rx_buf), GFP_KERNEL); + if (!rx_buf) { + ret = -ENOMEM; + goto err_rxb; + } + /* Allocate URB */ urb = usb_alloc_urb(0, GFP_KERNEL); if (urb == NULL) { @@ -848,11 +864,14 @@ static int ath9k_hif_usb_alloc_rx_urbs(struct hif_device_usb *hif_dev) goto err_skb; } + rx_buf->hif_dev = hif_dev; + rx_buf->skb = skb; + usb_fill_bulk_urb(urb, hif_dev->udev, usb_rcvbulkpipe(hif_dev->udev, USB_WLAN_RX_PIPE), skb->data, MAX_RX_BUF_SIZE, - ath9k_hif_usb_rx_cb, skb); + ath9k_hif_usb_rx_cb, rx_buf); /* Anchor URB */ usb_anchor_urb(urb, &hif_dev->rx_submitted); @@ -878,6 +897,8 @@ static int ath9k_hif_usb_alloc_rx_urbs(struct hif_device_usb *hif_dev) err_skb: usb_free_urb(urb); err_urb: + kfree(rx_buf); +err_rxb: ath9k_hif_usb_dealloc_rx_urbs(hif_dev); return ret; } @@ -889,14 +910,21 @@ static void ath9k_hif_usb_dealloc_reg_in_urbs(struct hif_device_usb *hif_dev) static int ath9k_hif_usb_alloc_reg_in_urbs(struct hif_device_usb *hif_dev) { - struct urb *urb = NULL; + struct rx_buf *rx_buf = NULL; struct sk_buff *skb = NULL; + struct urb *urb = NULL; int i, ret; init_usb_anchor(&hif_dev->reg_in_submitted); for (i = 0; i < MAX_REG_IN_URB_NUM; i++) { + rx_buf = kzalloc(sizeof(*rx_buf), GFP_KERNEL); + if (!rx_buf) { + ret = -ENOMEM; + goto err_rxb; + } + /* Allocate URB */ urb = usb_alloc_urb(0, GFP_KERNEL); if (urb == NULL) { @@ -911,11 +939,14 @@ static int ath9k_hif_usb_alloc_reg_in_urbs(struct hif_device_usb *hif_dev) goto err_skb; } + rx_buf->hif_dev = hif_dev; + rx_buf->skb = skb; + usb_fill_int_urb(urb, hif_dev->udev, usb_rcvintpipe(hif_dev->udev, USB_REG_IN_PIPE), skb->data, MAX_REG_IN_BUF_SIZE, - ath9k_hif_usb_reg_in_cb, skb, 1); + ath9k_hif_usb_reg_in_cb, rx_buf, 1); /* Anchor URB */ usb_anchor_urb(urb, &hif_dev->reg_in_submitted); @@ -941,6 +972,8 @@ static int ath9k_hif_usb_alloc_reg_in_urbs(struct hif_device_usb *hif_dev) err_skb: usb_free_urb(urb); err_urb: + kfree(rx_buf); +err_rxb: ath9k_hif_usb_dealloc_reg_in_urbs(hif_dev); return ret; } @@ -971,7 +1004,7 @@ static int ath9k_hif_usb_alloc_urbs(struct hif_device_usb *hif_dev) return -ENOMEM; } -static void ath9k_hif_usb_dealloc_urbs(struct hif_device_usb *hif_dev) +void ath9k_hif_usb_dealloc_urbs(struct hif_device_usb *hif_dev) { usb_kill_anchored_urbs(&hif_dev->regout_submitted); ath9k_hif_usb_dealloc_reg_in_urbs(hif_dev); @@ -1339,8 +1372,9 @@ static void ath9k_hif_usb_disconnect(struct usb_interface *interface) if (hif_dev->flags & HIF_USB_READY) { ath9k_htc_hw_deinit(hif_dev->htc_handle, unplugged); - ath9k_htc_hw_free(hif_dev->htc_handle); ath9k_hif_usb_dev_deinit(hif_dev); + ath9k_destoy_wmi(hif_dev->htc_handle->drv_priv); + ath9k_htc_hw_free(hif_dev->htc_handle); } usb_set_intfdata(interface, NULL); diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.h b/drivers/net/wireless/ath/ath9k/hif_usb.h index 7846916aa01df2a00a5f2a7a6332b514190c5114..5985aa15ca931386d58e06fea36402f10835936d 100644 --- a/drivers/net/wireless/ath/ath9k/hif_usb.h +++ b/drivers/net/wireless/ath/ath9k/hif_usb.h @@ -86,6 +86,11 @@ struct tx_buf { struct list_head list; }; +struct rx_buf { + struct sk_buff *skb; + struct hif_device_usb *hif_dev; +}; + #define HIF_USB_TX_STOP BIT(0) #define HIF_USB_TX_FLUSH BIT(1) @@ -133,5 +138,6 @@ struct hif_device_usb { int ath9k_hif_usb_init(void); void ath9k_hif_usb_exit(void); +void ath9k_hif_usb_dealloc_urbs(struct hif_device_usb *hif_dev); #endif /* HTC_USB_H */ diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c index da2164b0cccc3890c100786c334d6c3cc3923ec8..66ef5cf16450d103a84e89223d7b4d00d77b0b05 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c @@ -933,8 +933,9 @@ static int ath9k_init_device(struct ath9k_htc_priv *priv, int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev, u16 devid, char *product, u32 drv_info) { - struct ieee80211_hw *hw; + struct hif_device_usb *hif_dev; struct ath9k_htc_priv *priv; + struct ieee80211_hw *hw; int ret; hw = ieee80211_alloc_hw(sizeof(struct ath9k_htc_priv), &ath9k_htc_ops); @@ -969,7 +970,10 @@ int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev, return 0; err_init: - ath9k_deinit_wmi(priv); + ath9k_stop_wmi(priv); + hif_dev = (struct hif_device_usb *)htc_handle->hif_dev; + ath9k_hif_usb_dealloc_urbs(hif_dev); + ath9k_destoy_wmi(priv); err_free: ieee80211_free_hw(hw); return ret; @@ -984,7 +988,7 @@ void ath9k_htc_disconnect_device(struct htc_target *htc_handle, bool hotunplug) htc_handle->drv_priv->ah->ah_flags |= AH_UNPLUGGED; ath9k_deinit_device(htc_handle->drv_priv); - ath9k_deinit_wmi(htc_handle->drv_priv); + ath9k_stop_wmi(htc_handle->drv_priv); ieee80211_free_hw(htc_handle->drv_priv->hw); } } diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c index 4748f557c75363983806b4d82830d32d0e1f4a24..11d06021b5e4d3318bc22c7f57e11e8f5cb3fae1 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c @@ -999,9 +999,9 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv, * which are not PHY_ERROR (short radar pulses have a length of 3) */ if (unlikely(!rs_datalen || (rs_datalen < 10 && !is_phyerr))) { - ath_warn(common, - "Short RX data len, dropping (dlen: %d)\n", - rs_datalen); + ath_dbg(common, ANY, + "Short RX data len, dropping (dlen: %d)\n", + rs_datalen); goto rx_next; } diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c index 1bf63a4efb4c84f09fe0ae6b213df849202bade7..d2e062eaf56149b3ea76d452d75ecf81f9154a1b 100644 --- a/drivers/net/wireless/ath/ath9k/htc_hst.c +++ b/drivers/net/wireless/ath/ath9k/htc_hst.c @@ -113,6 +113,9 @@ static void htc_process_conn_rsp(struct htc_target *target, if (svc_rspmsg->status == HTC_SERVICE_SUCCESS) { epid = svc_rspmsg->endpoint_id; + if (epid < 0 || epid >= ENDPOINT_MAX) + return; + service_id = be16_to_cpu(svc_rspmsg->service_id); max_msglen = be16_to_cpu(svc_rspmsg->max_msg_len); endpoint = &target->endpoint[epid]; diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c index 64a354fa78ab2f79b7811e7119886f7c463473cc..f57f48e4d7a0ab488e5a2dd685689b83495598f6 100644 --- a/drivers/net/wireless/ath/ath9k/wmi.c +++ b/drivers/net/wireless/ath/ath9k/wmi.c @@ -112,14 +112,17 @@ struct wmi *ath9k_init_wmi(struct ath9k_htc_priv *priv) return wmi; } -void ath9k_deinit_wmi(struct ath9k_htc_priv *priv) +void ath9k_stop_wmi(struct ath9k_htc_priv *priv) { struct wmi *wmi = priv->wmi; mutex_lock(&wmi->op_mutex); wmi->stopped = true; mutex_unlock(&wmi->op_mutex); +} +void ath9k_destoy_wmi(struct ath9k_htc_priv *priv) +{ kfree(priv->wmi); } diff --git a/drivers/net/wireless/ath/ath9k/wmi.h b/drivers/net/wireless/ath/ath9k/wmi.h index 380175d5ecd7a7d54e22d27fb434efa65dfcdd10..d8b9122062324cf244af2eb7dcd4f8e2ef5ebb42 100644 --- a/drivers/net/wireless/ath/ath9k/wmi.h +++ b/drivers/net/wireless/ath/ath9k/wmi.h @@ -179,7 +179,6 @@ struct wmi { }; struct wmi *ath9k_init_wmi(struct ath9k_htc_priv *priv); -void ath9k_deinit_wmi(struct ath9k_htc_priv *priv); int ath9k_wmi_connect(struct htc_target *htc, struct wmi *wmi, enum htc_endpoint_id *wmi_ctrl_epid); int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id, @@ -189,6 +188,8 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id, void ath9k_wmi_event_tasklet(unsigned long data); void ath9k_fatal_work(struct work_struct *work); void ath9k_wmi_event_drain(struct ath9k_htc_priv *priv); +void ath9k_stop_wmi(struct ath9k_htc_priv *priv); +void ath9k_destoy_wmi(struct ath9k_htc_priv *priv); #define WMI_CMD(_wmi_cmd) \ do { \ diff --git a/drivers/net/wireless/ath/carl9170/fw.c b/drivers/net/wireless/ath/carl9170/fw.c index 88045f93a76c1971febec6587f404e1bf0a5d181..62ed0977f32cb872043aafcddc3a4940f521634a 100644 --- a/drivers/net/wireless/ath/carl9170/fw.c +++ b/drivers/net/wireless/ath/carl9170/fw.c @@ -351,9 +351,7 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len) ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC); if (SUPP(CARL9170FW_WLANTX_CAB)) { - if_comb_types |= - BIT(NL80211_IFTYPE_AP) | - BIT(NL80211_IFTYPE_P2P_GO); + if_comb_types |= BIT(NL80211_IFTYPE_AP); #ifdef CONFIG_MAC80211_MESH if_comb_types |= diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c index 988c8857d78c9d21abbe3b007ce605b13e04b109..80312b2fddb1ba6ee3211e77bbe6a2171ff152fb 100644 --- a/drivers/net/wireless/ath/carl9170/main.c +++ b/drivers/net/wireless/ath/carl9170/main.c @@ -582,11 +582,10 @@ static int carl9170_init_interface(struct ar9170 *ar, ar->disable_offload |= ((vif->type != NL80211_IFTYPE_STATION) && (vif->type != NL80211_IFTYPE_AP)); - /* While the driver supports HW offload in a single - * P2P client configuration, it doesn't support HW - * offload in the favourit, concurrent P2P GO+CLIENT - * configuration. Hence, HW offload will always be - * disabled for P2P. + /* The driver used to have P2P GO+CLIENT support, + * but since this was dropped and we don't know if + * there are any gremlins lurking in the shadows, + * so best we keep HW offload disabled for P2P. */ ar->disable_offload |= vif->p2p; @@ -639,18 +638,6 @@ static int carl9170_op_add_interface(struct ieee80211_hw *hw, if (vif->type == NL80211_IFTYPE_STATION) break; - /* P2P GO [master] use-case - * Because the P2P GO station is selected dynamically - * by all participating peers of a WIFI Direct network, - * the driver has be able to change the main interface - * operating mode on the fly. - */ - if (main_vif->p2p && vif->p2p && - vif->type == NL80211_IFTYPE_AP) { - old_main = main_vif; - break; - } - err = -EBUSY; rcu_read_unlock(); diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c index af37c19dbfd7e02d4035cd6085cb06d55204949f..688152bcfc15cedde635b38f3ce213e553c56a6b 100644 --- a/drivers/net/wireless/ath/wcn36xx/main.c +++ b/drivers/net/wireless/ath/wcn36xx/main.c @@ -1280,7 +1280,7 @@ static int wcn36xx_probe(struct platform_device *pdev) if (addr && ret != ETH_ALEN) { wcn36xx_err("invalid local-mac-address\n"); ret = -EINVAL; - goto out_wq; + goto out_destroy_ept; } else if (addr) { wcn36xx_info("mac address: %pM\n", addr); SET_IEEE80211_PERM_ADDR(wcn->hw, addr); @@ -1288,7 +1288,7 @@ static int wcn36xx_probe(struct platform_device *pdev) ret = wcn36xx_platform_get_resources(wcn, pdev); if (ret) - goto out_wq; + goto out_destroy_ept; wcn36xx_init_ieee80211(wcn); ret = ieee80211_register_hw(wcn->hw); @@ -1300,6 +1300,8 @@ static int wcn36xx_probe(struct platform_device *pdev) out_unmap: iounmap(wcn->ccu_base); iounmap(wcn->dxe_base); +out_destroy_ept: + rpmsg_destroy_ept(wcn->smd_channel); out_wq: ieee80211_free_hw(hw); out_err: diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c index b37e7391f55defb4ded9635bdc15f55117789ba2..8a226a9d755e2d97926976a726356b4c366448fc 100644 --- a/drivers/net/wireless/broadcom/b43/main.c +++ b/drivers/net/wireless/broadcom/b43/main.c @@ -5596,7 +5596,7 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev) /* fill hw info */ ieee80211_hw_set(hw, RX_INCLUDES_FCS); ieee80211_hw_set(hw, SIGNAL_DBM); - + ieee80211_hw_set(hw, MFP_CAPABLE); hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_MESH_POINT) | diff --git a/drivers/net/wireless/broadcom/b43legacy/main.c b/drivers/net/wireless/broadcom/b43legacy/main.c index f435bd0f8b5b5efba00f8a7c17c4b18323d3987e..6f123a52ae2dc6d2c26e197bbc2b60b5e466f5ba 100644 --- a/drivers/net/wireless/broadcom/b43legacy/main.c +++ b/drivers/net/wireless/broadcom/b43legacy/main.c @@ -3835,6 +3835,7 @@ static int b43legacy_wireless_init(struct ssb_device *dev) /* fill hw info */ ieee80211_hw_set(hw, RX_INCLUDES_FCS); ieee80211_hw_set(hw, SIGNAL_DBM); + ieee80211_hw_set(hw, MFP_CAPABLE); /* Allow WPA3 in software */ hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_AP) | diff --git a/drivers/net/wireless/broadcom/b43legacy/xmit.c b/drivers/net/wireless/broadcom/b43legacy/xmit.c index 35ccf400b02c2f51004176bc8647153890605fd9..87045e30e58503856e5061ed2e870405c838bef9 100644 --- a/drivers/net/wireless/broadcom/b43legacy/xmit.c +++ b/drivers/net/wireless/broadcom/b43legacy/xmit.c @@ -571,6 +571,7 @@ void b43legacy_rx(struct b43legacy_wldev *dev, default: b43legacywarn(dev->wl, "Unexpected value for chanstat (0x%X)\n", chanstat); + goto drop; } memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status)); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c index 53ae3025998999e26f2f06307fe4abd6f6343dc9..473b2b3cb6f556aa2ce6861fd15accf79e23f43a 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c @@ -192,13 +192,14 @@ void brcmf_feat_attach(struct brcmf_pub *drvr) if (!err) ifp->drvr->feat_flags |= BIT(BRCMF_FEAT_SCAN_RANDOM_MAC); + brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_FWSUP, "sup_wpa"); + if (drvr->settings->feature_disable) { brcmf_dbg(INFO, "Features: 0x%02x, disable: 0x%02x\n", ifp->drvr->feat_flags, drvr->settings->feature_disable); ifp->drvr->feat_flags &= ~drvr->settings->feature_disable; } - brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_FWSUP, "sup_wpa"); /* set chip related quirks */ switch (drvr->bus_if->chip) { diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c index f3f20abbe2696a057d724ffecea87937676be57e..d06dc446a393129f7b393ada86d96e5566aac713 100644 --- a/drivers/net/wireless/cisco/airo.c +++ b/drivers/net/wireless/cisco/airo.c @@ -1928,6 +1928,10 @@ static netdev_tx_t mpi_start_xmit(struct sk_buff *skb, airo_print_err(dev->name, "%s: skb == NULL!",__func__); return NETDEV_TX_OK; } + if (skb_padto(skb, ETH_ZLEN)) { + dev->stats.tx_dropped++; + return NETDEV_TX_OK; + } npacks = skb_queue_len (&ai->txq); if (npacks >= MAXTXQ - 1) { @@ -2130,6 +2134,10 @@ static netdev_tx_t airo_start_xmit(struct sk_buff *skb, airo_print_err(dev->name, "%s: skb == NULL!", __func__); return NETDEV_TX_OK; } + if (skb_padto(skb, ETH_ZLEN)) { + dev->stats.tx_dropped++; + return NETDEV_TX_OK; + } /* Find a vacant FID */ for( i = 0; i < MAX_FIDS / 2 && (fids[i] & 0xffff0000); i++ ); @@ -2204,6 +2212,10 @@ static netdev_tx_t airo_start_xmit11(struct sk_buff *skb, airo_print_err(dev->name, "%s: skb == NULL!", __func__); return NETDEV_TX_OK; } + if (skb_padto(skb, ETH_ZLEN)) { + dev->stats.tx_dropped++; + return NETDEV_TX_OK; + } /* Find a vacant FID */ for( i = MAX_FIDS / 2; i < MAX_FIDS && (fids[i] & 0xffff0000); i++ ); diff --git a/drivers/net/wireless/cnss2/Kconfig b/drivers/net/wireless/cnss2/Kconfig index 209d315dba3087f27fd86ecbf673301b6816f122..f36cc1b9c298d387e94c78181f9bd96318271020 100644 --- a/drivers/net/wireless/cnss2/Kconfig +++ b/drivers/net/wireless/cnss2/Kconfig @@ -72,3 +72,9 @@ config CNSS_EMULATION emulation hardware. These changes are needed for WLAN drivers to support and meet the requirement of emulation hardware. + +config CNSS_SUPPORT_DUAL_DEV + bool "Enable cnss support dual wlan card" + ---help--- + This enables the changes from cnss platform driver to support dual + wlan card attach \ No newline at end of file diff --git a/drivers/net/wireless/cnss2/debug.c b/drivers/net/wireless/cnss2/debug.c index 813129843e5b92523e723930f4317ca3eb6936bb..1b250cc7ed9001f6231a7a7bbc639d8acf1b379b 100644 --- a/drivers/net/wireless/cnss2/debug.c +++ b/drivers/net/wireless/cnss2/debug.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -681,8 +681,14 @@ int cnss_debugfs_create(struct cnss_plat_data *plat_priv) { int ret = 0; struct dentry *root_dentry; + char name[15]; + + if (cnss_get_dual_wlan()) + snprintf(name, sizeof(name), "cnss_%d", plat_priv->idx); + else + snprintf(name, sizeof(name), "cnss"); + root_dentry = debugfs_create_dir(name, NULL); - root_dentry = debugfs_create_dir("cnss", 0); if (IS_ERR(root_dentry)) { ret = PTR_ERR(root_dentry); cnss_pr_err("Unable to create debugfs %d\n", ret); diff --git a/drivers/net/wireless/cnss2/main.c b/drivers/net/wireless/cnss2/main.c index 4b2005b44ee642b44dbfbeae094c4ff1c532f6ba..cc39db85c446e8a916af1236c876110770322205 100644 --- a/drivers/net/wireless/cnss2/main.c +++ b/drivers/net/wireless/cnss2/main.c @@ -47,7 +47,20 @@ #define CNSS_QMI_TIMEOUT_DEFAULT 10000 #define CNSS_BDF_TYPE_DEFAULT CNSS_BDF_ELF +#ifdef CONFIG_CNSS_SUPPORT_DUAL_DEV +#define CNSS_DUAL_WLAN 1 +#else +#define CNSS_DUAL_WLAN 0 +#endif + +#ifdef CONFIG_CNSS_SUPPORT_DUAL_DEV +static struct cnss_plat_data *plat_env[CNSS_MAX_DEV_NUM]; +static int plat_env_count; +#else static struct cnss_plat_data *plat_env; +#endif + +static bool pm_notify_registered; static DECLARE_RWSEM(cnss_pm_sem); @@ -70,6 +83,80 @@ struct cnss_driver_event { void *data; }; +#ifdef CONFIG_CNSS_SUPPORT_DUAL_DEV +static void cnss_set_plat_priv(struct platform_device *plat_dev, + struct cnss_plat_data *plat_priv) +{ + cnss_pr_dbg("Set plat_priv at %d", plat_env_count); + if (plat_priv) { + plat_priv->idx = plat_env_count; + plat_env[plat_priv->idx] = plat_priv; + plat_env_count++; + } +} + +struct cnss_plat_data *cnss_get_plat_priv(struct platform_device + *plat_dev) +{ + int i; + + if (!plat_dev) + return NULL; + + for (i = 0; i < plat_env_count; i++) { + if (plat_env[i]->plat_dev == plat_dev) + return plat_env[i]; + } + return NULL; +} + +static void cnss_clear_plat_priv(struct cnss_plat_data *plat_priv) +{ + cnss_pr_dbg("Clear plat_priv at %d", plat_priv->idx); + plat_env[plat_priv->idx] = NULL; + plat_env_count--; +} + +static int cnss_set_device_name(struct cnss_plat_data *plat_priv) +{ + snprintf(plat_priv->device_name, sizeof(plat_priv->device_name), + "wlan_%d", plat_priv->idx); + + return 0; +} + +static int cnss_plat_env_available(void) +{ + int ret = 0; + + if (plat_env_count >= CNSS_MAX_DEV_NUM) { + cnss_pr_err("ERROR: No space to store plat_priv\n"); + ret = -ENOMEM; + } + return ret; +} + +int cnss_get_plat_env_count(void) +{ + return plat_env_count; +} + +struct cnss_plat_data *cnss_get_plat_env(int index) +{ + return plat_env[index]; +} + +struct cnss_plat_data *cnss_get_plat_priv_by_rc_num(int rc_num) +{ + int i; + + for (i = 0; i < plat_env_count; i++) { + if (plat_env[i]->rc_num == rc_num) + return plat_env[i]; + } + return NULL; +} +#else static void cnss_set_plat_priv(struct platform_device *plat_dev, struct cnss_plat_data *plat_priv) { @@ -81,6 +168,35 @@ struct cnss_plat_data *cnss_get_plat_priv(struct platform_device *plat_dev) return plat_env; } +static void cnss_clear_plat_priv(struct cnss_plat_data *plat_priv) +{ + plat_env = NULL; +} + +static int cnss_set_device_name(struct cnss_plat_data *plat_priv) +{ + snprintf(plat_priv->device_name, sizeof(plat_priv->device_name), + "wlan"); + return 0; +} + +static int cnss_plat_env_available(void) +{ + return 0; +} + +struct cnss_plat_data *cnss_get_plat_priv_by_rc_num(int rc_num) +{ + return cnss_bus_dev_to_plat_priv(NULL); +} + +#endif + +bool cnss_get_dual_wlan(void) +{ + return CNSS_DUAL_WLAN; +} + static int cnss_pm_notify(struct notifier_block *b, unsigned long event, void *p) { @@ -1550,7 +1666,7 @@ int cnss_register_subsys(struct cnss_plat_data *plat_priv) subsys_info = &plat_priv->subsys_info; - subsys_info->subsys_desc.name = "wlan"; + subsys_info->subsys_desc.name = plat_priv->device_name; subsys_info->subsys_desc.owner = THIS_MODULE; subsys_info->subsys_desc.powerup = cnss_subsys_powerup; subsys_info->subsys_desc.shutdown = cnss_subsys_shutdown; @@ -1926,7 +2042,10 @@ static int cnss_misc_init(struct cnss_plat_data *plat_priv) setup_timer(&plat_priv->fw_boot_timer, cnss_bus_fw_boot_timeout_hdlr, (unsigned long)plat_priv); - register_pm_notifier(&cnss_pm_notifier); + if (!pm_notify_registered) { + register_pm_notifier(&cnss_pm_notifier); + pm_notify_registered = true; + } ret = device_init_wakeup(&plat_priv->plat_dev->dev, true); if (ret) @@ -1949,7 +2068,10 @@ static void cnss_misc_deinit(struct cnss_plat_data *plat_priv) complete_all(&plat_priv->cal_complete); complete_all(&plat_priv->power_up_complete); device_init_wakeup(&plat_priv->plat_dev->dev, false); - unregister_pm_notifier(&cnss_pm_notifier); + if (pm_notify_registered) { + unregister_pm_notifier(&cnss_pm_notifier); + pm_notify_registered = false; + } del_timer(&plat_priv->fw_boot_timer); } @@ -2042,6 +2164,20 @@ cnss_is_converged_dt(struct cnss_plat_data *plat_priv) "qcom,converged-dt"); } +static inline int +cnss_get_rc_num(struct cnss_plat_data *plat_priv) +{ + return of_property_read_u32(plat_priv->plat_dev->dev.of_node, + "qcom,wlan-rc-num", &plat_priv->rc_num); +} + +static inline int +cnss_get_qrtr_node_id(struct cnss_plat_data *plat_priv) +{ + return of_property_read_u32(plat_priv->plat_dev->dev.of_node, + "qcom,qrtr_node_id", &plat_priv->qrtr_node_id); +} + static int cnss_probe(struct platform_device *plat_dev) { int ret = 0; @@ -2054,6 +2190,9 @@ static int cnss_probe(struct platform_device *plat_dev) ret = -EEXIST; goto out; } + ret = cnss_plat_env_available(); + if (ret != 0) + goto out; of_id = of_match_device(cnss_of_match_table, &plat_dev->dev); if (!of_id || !of_id->data) { @@ -2074,12 +2213,31 @@ static int cnss_probe(struct platform_device *plat_dev) plat_priv->plat_dev = plat_dev; plat_priv->dev_node = NULL; plat_priv->device_id = device_id->driver_data; + + ret = cnss_get_rc_num(plat_priv); + if (ret) + cnss_pr_err("Failed to find PCIe RC number, err = %d\n", ret); + cnss_pr_dbg("%s: rc_num=%d\n", __func__, plat_priv->rc_num); + + ret = cnss_get_qrtr_node_id(plat_priv); + if (ret) { + cnss_pr_dbg("Failed to find qrtr_node_id err=%d\n", ret); + plat_priv->qrtr_node_id = 0; + plat_priv->wlfw_service_instance_id = 0; + } else { + plat_priv->wlfw_service_instance_id = plat_priv->qrtr_node_id + + FW_ID_BASE; + cnss_pr_dbg("service_instance_id=0x%x\n", + plat_priv->wlfw_service_instance_id); + } + plat_priv->is_converged_dt = cnss_is_converged_dt(plat_priv); cnss_pr_dbg("Probing platform driver from %s DT\n", plat_priv->is_converged_dt ? "converged" : "single"); plat_priv->bus_type = cnss_get_bus_type(plat_priv); cnss_set_plat_priv(plat_dev, plat_priv); + cnss_set_device_name(plat_priv); platform_set_drvdata(plat_dev, plat_priv); INIT_LIST_HEAD(&plat_priv->vreg_list); @@ -2128,10 +2286,6 @@ static int cnss_probe(struct platform_device *plat_dev) if (ret) goto destroy_debugfs; - ret = cnss_genl_init(); - if (ret < 0) - cnss_pr_err("CNSS genl init failed %d\n", ret); - cnss_pr_info("Platform driver probed successfully.\n"); return 0; @@ -2158,7 +2312,7 @@ static int cnss_probe(struct platform_device *plat_dev) cnss_put_resources(plat_priv); reset_ctx: platform_set_drvdata(plat_dev, NULL); - cnss_set_plat_priv(plat_dev, NULL); + cnss_clear_plat_priv(plat_priv); out: return ret; } @@ -2167,7 +2321,6 @@ static int cnss_remove(struct platform_device *plat_dev) { struct cnss_plat_data *plat_priv = platform_get_drvdata(plat_dev); - cnss_genl_exit(); cnss_misc_deinit(plat_priv); cnss_debugfs_destroy(plat_priv); cnss_qmi_deinit(plat_priv); @@ -2178,7 +2331,7 @@ static int cnss_remove(struct platform_device *plat_dev) cnss_bus_deinit(plat_priv); cnss_put_resources(plat_priv); platform_set_drvdata(plat_dev, NULL); - plat_env = NULL; + cnss_clear_plat_priv(plat_priv); return 0; } @@ -2204,6 +2357,9 @@ static int __init cnss_initialize(void) ret = platform_driver_register(&cnss_platform_driver); if (ret) cnss_debug_deinit(); + ret = cnss_genl_init(); + if (ret < 0) + cnss_pr_err("CNSS genl init failed %d\n", ret); return ret; } @@ -2212,6 +2368,7 @@ static void __exit cnss_exit(void) { platform_driver_unregister(&cnss_platform_driver); cnss_debug_deinit(); + cnss_genl_exit(); } module_init(cnss_initialize); diff --git a/drivers/net/wireless/cnss2/main.h b/drivers/net/wireless/cnss2/main.h index b4795eb848d34487e9c8cfb52932e796add2a647..dde407e1d9796d4beac29666d30fdf5e26dcc0e5 100644 --- a/drivers/net/wireless/cnss2/main.h +++ b/drivers/net/wireless/cnss2/main.h @@ -37,6 +37,8 @@ #define CNSS_FW_PATH_MAX_LEN 32 +#define CNSS_MAX_DEV_NUM 2 + enum cnss_dev_bus_type { CNSS_BUS_NONE = -1, CNSS_BUS_PCI, @@ -267,6 +269,7 @@ enum cnss_ce_index { struct cnss_plat_data { struct platform_device *plat_dev; + enum cnss_driver_mode driver_mode; void *bus_priv; enum cnss_dev_bus_type bus_type; struct list_head vreg_list; @@ -320,9 +323,20 @@ struct cnss_plat_data { u8 set_wlaon_pwr_ctrl; bool fw_pcie_gen_switch; u8 pcie_gen_speed; + u32 rc_num; + char device_name[16]; + u32 idx; + bool enumerate_done; + int qrtr_node_id; + unsigned int wlfw_service_instance_id; }; struct cnss_plat_data *cnss_get_plat_priv(struct platform_device *plat_dev); +struct cnss_plat_data *cnss_get_plat_priv_by_rc_num(int rc_num); +int cnss_get_plat_env_count(void); +struct cnss_plat_data *cnss_get_plat_env(int index); +bool cnss_get_dual_wlan(void); + int cnss_driver_event_post(struct cnss_plat_data *plat_priv, enum cnss_driver_event_type type, u32 flags, void *data); diff --git a/drivers/net/wireless/cnss2/pci.c b/drivers/net/wireless/cnss2/pci.c index 7946cc953753718c7398b68a83a1f8dc90a5535a..7f3d6685070f2ae78b64251bb04f5a6f5ebef8a4 100644 --- a/drivers/net/wireless/cnss2/pci.c +++ b/drivers/net/wireless/cnss2/pci.c @@ -47,6 +47,8 @@ #define DEFAULT_M3_FILE_NAME "m3.bin" #define DEFAULT_FW_FILE_NAME "amss.bin" #define FW_V2_FILE_NAME "amss20.bin" +#define DEFAULT_GENOA_FW_FTM_NAME "genoaftm.bin" + #define FW_V2_NUMBER 2 #define WAKE_MSI_NAME "WAKE" @@ -60,6 +62,8 @@ #define EMULATION_HW 0 #endif +static bool cnss_driver_registered; + static DEFINE_SPINLOCK(pci_link_down_lock); static DEFINE_SPINLOCK(pci_reg_window_lock); @@ -137,12 +141,15 @@ static DEFINE_SPINLOCK(pci_reg_window_lock); #define FORCE_WAKE_DELAY_TIMEOUT_US 60000 #define QCA6390_WLAON_QFPROM_PWR_CTRL_REG 0x1F8031C +#define QCA6390_PCIE_SCRATCH_0_SOC_PCIE_REG 0x1E04040 #define POWER_ON_RETRY_MAX_TIMES 3 #define POWER_ON_RETRY_DELAY_MS 200 #define LINK_TRAINING_RETRY_MAX_TIMES 3 +static void cnss_pci_update_fw_name(struct cnss_pci_data *pci_priv); + static struct cnss_pci_reg ce_src[] = { { "SRC_RING_BASE_LSB", QCA6390_CE_SRC_RING_BASE_LSB_OFFSET }, { "SRC_RING_BASE_MSB", QCA6390_CE_SRC_RING_BASE_MSB_OFFSET }, @@ -1236,13 +1243,72 @@ int cnss_pci_is_drv_connected(struct device *dev) } EXPORT_SYMBOL(cnss_pci_is_drv_connected); +#ifdef CONFIG_CNSS_SUPPORT_DUAL_DEV +static struct cnss_plat_data *cnss_get_plat_priv_by_driver_ops( + struct cnss_wlan_driver *driver_ops) +{ + int plat_env_count = cnss_get_plat_env_count(); + struct cnss_plat_data *plat_env; + struct cnss_pci_data *pci_priv; + int i = 0; + + if (!driver_ops) { + cnss_pr_err("No cnss driver\n"); + return NULL; + } + + for (i = 0; i < plat_env_count; i++) { + plat_env = cnss_get_plat_env(i); + + if (!plat_env) + continue; + + pci_priv = plat_env->bus_priv; + if (!pci_priv) { + cnss_pr_err("pci_priv is NULL\n"); + continue; + } + + if (driver_ops == pci_priv->driver_ops) + return plat_env; + } + /* Doesn't find the existing instance, + * so return the fist empty instance + */ + for (i = 0; i < plat_env_count; i++) { + plat_env = cnss_get_plat_env(i); + + if (!plat_env) + continue; + pci_priv = plat_env->bus_priv; + if (!pci_priv) { + cnss_pr_err("pci_priv is NULL\n"); + continue; + } + + if (!pci_priv->driver_ops) + return plat_env; + } + + return NULL; +} + +#else +static struct cnss_plat_data *cnss_get_plat_priv_by_driver_ops( + struct cnss_wlan_driver *driver_ops) +{ + return cnss_bus_dev_to_plat_priv(NULL); +} +#endif + int cnss_wlan_register_driver(struct cnss_wlan_driver *driver_ops) { int ret = 0; - struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL); + struct cnss_plat_data *plat_priv; struct cnss_pci_data *pci_priv; unsigned int timeout; + plat_priv = cnss_get_plat_priv_by_driver_ops(driver_ops); if (!plat_priv) { cnss_pr_err("plat_priv is NULL\n"); return -ENODEV; @@ -1259,6 +1325,11 @@ int cnss_wlan_register_driver(struct cnss_wlan_driver *driver_ops) return -EEXIST; } + if (driver_ops->get_driver_mode) { + plat_priv->driver_mode = driver_ops->get_driver_mode(); + cnss_pci_update_fw_name(pci_priv); + } + if (!test_bit(CNSS_COLD_BOOT_CAL, &plat_priv->driver_state)) goto register_driver; @@ -1286,9 +1357,10 @@ EXPORT_SYMBOL(cnss_wlan_register_driver); void cnss_wlan_unregister_driver(struct cnss_wlan_driver *driver_ops) { - struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL); + struct cnss_plat_data *plat_priv; int ret = 0; + plat_priv = cnss_get_plat_priv_by_driver_ops(driver_ops); if (!plat_priv) { cnss_pr_err("plat_priv is NULL\n"); return; @@ -1989,7 +2061,14 @@ int cnss_pci_force_wake_request_sync(struct device *dev, int timeout_us) if (test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state)) return -EAGAIN; - return mhi_device_get_sync_atomic(mhi_ctrl->mhi_dev, timeout_us); + if (timeout_us) { + /* Busy wait for timeout_us */ + return mhi_device_get_sync_atomic(mhi_ctrl->mhi_dev, + timeout_us); + } else { + /* Sleep wait for mhi_ctrl->timeout_ms */ + return mhi_device_get_sync(mhi_ctrl->mhi_dev, MHI_VOTE_DEVICE); + } } EXPORT_SYMBOL(cnss_pci_force_wake_request_sync); @@ -1997,6 +2076,7 @@ int cnss_pci_force_wake_request(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev); + struct cnss_plat_data *plat_priv; struct mhi_controller *mhi_ctrl; if (!pci_priv) @@ -2009,9 +2089,14 @@ int cnss_pci_force_wake_request(struct device *dev) if (!mhi_ctrl) return -EINVAL; - read_lock_bh(&mhi_ctrl->pm_lock); - mhi_ctrl->wake_get(mhi_ctrl, true); - read_unlock_bh(&mhi_ctrl->pm_lock); + plat_priv = pci_priv->plat_priv; + if (!plat_priv) + return -ENODEV; + + if (test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state)) + return -EAGAIN; + + mhi_device_get(mhi_ctrl->mhi_dev, MHI_VOTE_DEVICE); return 0; } @@ -2041,6 +2126,7 @@ int cnss_pci_force_wake_release(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev); + struct cnss_plat_data *plat_priv; struct mhi_controller *mhi_ctrl; if (!pci_priv) @@ -2053,9 +2139,14 @@ int cnss_pci_force_wake_release(struct device *dev) if (!mhi_ctrl) return -EINVAL; - read_lock_bh(&mhi_ctrl->pm_lock); - mhi_ctrl->wake_put(mhi_ctrl, false); - read_unlock_bh(&mhi_ctrl->pm_lock); + plat_priv = pci_priv->plat_priv; + if (!plat_priv) + return -ENODEV; + + if (test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state)) + return -EAGAIN; + + mhi_device_put(mhi_ctrl->mhi_dev, MHI_VOTE_DEVICE); return 0; } @@ -2261,10 +2352,11 @@ int cnss_smmu_map(struct device *dev, } len = roundup(size + paddr - rounddown(paddr, PAGE_SIZE), PAGE_SIZE); - iova = roundup(pci_priv->smmu_iova_ipa_start, PAGE_SIZE); + iova = roundup(pci_priv->smmu_iova_ipa_current, PAGE_SIZE); - if (iova >= - (pci_priv->smmu_iova_ipa_start + pci_priv->smmu_iova_ipa_len)) { + if (pci_priv->iommu_geometry && + iova >= pci_priv->smmu_iova_ipa_start + + pci_priv->smmu_iova_ipa_len) { cnss_pr_err("No IOVA space to map, iova %lx, smmu_iova_ipa_start %pad, smmu_iova_ipa_len %zu\n", iova, &pci_priv->smmu_iova_ipa_start, @@ -2272,6 +2364,8 @@ int cnss_smmu_map(struct device *dev, return -ENOMEM; } + cnss_pr_dbg("IOMMU map: iova %lx len %zu\n", iova, len); + ret = iommu_map(pci_priv->smmu_mapping->domain, iova, rounddown(paddr, PAGE_SIZE), len, IOMMU_READ | IOMMU_WRITE); @@ -2280,13 +2374,50 @@ int cnss_smmu_map(struct device *dev, return ret; } - pci_priv->smmu_iova_ipa_start = iova + len; + pci_priv->smmu_iova_ipa_current = iova + len; *iova_addr = (uint32_t)(iova + paddr - rounddown(paddr, PAGE_SIZE)); + cnss_pr_dbg("IOMMU map: iova_addr %lx", *iova_addr); return 0; } EXPORT_SYMBOL(cnss_smmu_map); +int cnss_smmu_unmap(struct device *dev, uint32_t iova_addr, size_t size) +{ + struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev)); + unsigned long iova; + size_t unmapped; + size_t len; + + if (!pci_priv) + return -ENODEV; + + iova = rounddown(iova_addr, PAGE_SIZE); + len = roundup(size + iova_addr - iova, PAGE_SIZE); + + if (iova >= pci_priv->smmu_iova_ipa_start + + pci_priv->smmu_iova_ipa_len) { + cnss_pr_err("Out of IOVA space to unmap, iova %lx, smmu_iova_ipa_start %pad, smmu_iova_ipa_len %zu\n", + iova, + &pci_priv->smmu_iova_ipa_start, + pci_priv->smmu_iova_ipa_len); + return -ENOMEM; + } + + cnss_pr_dbg("IOMMU unmap: iova %lx len %zu\n", iova, len); + + unmapped = iommu_unmap(pci_priv->smmu_mapping->domain, iova, len); + if (unmapped != len) { + cnss_pr_err("IOMMU unmap failed, unmapped = %zu, requested = %zu\n", + unmapped, len); + return -EINVAL; + } + + pci_priv->smmu_iova_ipa_current = iova; + return 0; +} +EXPORT_SYMBOL(cnss_smmu_unmap); + int cnss_get_soc_info(struct device *dev, struct cnss_soc_info *info) { struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev)); @@ -2734,6 +2865,7 @@ void cnss_pci_collect_dump_info(struct cnss_pci_data *pci_priv, bool in_panic) if (cnss_pci_check_link_status(pci_priv)) return; + cnss_pci_dump_shadow_reg(pci_priv); cnss_pci_dump_qdss_reg(pci_priv); ret = mhi_download_rddm_img(pci_priv->mhi_ctrl, in_panic); @@ -2948,7 +3080,21 @@ static void cnss_pci_update_fw_name(struct cnss_pci_data *pci_priv) "%s" FW_V2_FILE_NAME, cnss_get_fw_path(plat_priv)); mhi_ctrl->fw_image = plat_priv->firmware_name; } - + if (pci_priv->device_id == QCN7605_DEVICE_ID) { + if (plat_priv->driver_mode == CNSS_FTM) { + snprintf(plat_priv->firmware_name, + sizeof(plat_priv->firmware_name), + "%s" DEFAULT_GENOA_FW_FTM_NAME, + cnss_get_fw_path(plat_priv)); + mhi_ctrl->fw_image = plat_priv->firmware_name; + } else { + snprintf(plat_priv->firmware_name, + sizeof(plat_priv->firmware_name), + "%s" DEFAULT_FW_FILE_NAME, + cnss_get_fw_path(plat_priv)); + mhi_ctrl->fw_image = plat_priv->firmware_name; + } + } cnss_pr_dbg("Firmware name is %s\n", mhi_ctrl->fw_image); } @@ -3216,6 +3362,44 @@ int cnss_pci_start_mhi(struct cnss_pci_data *pci_priv) if (ret) goto out; + /** + * in the single wlan chipset case, plat_priv->qrtr_node_id always is 0, + * wlan fw will use the hardcode 7 as the qrtr node id. + * in the dual Hastings case, we will read qrtr node id + * from device tree and pass to get plat_priv->qrtr_node_id, + * which always is not zero. And then store this new value + * to pcie register, wlan fw will read out this qrtr node id + * from this register and overwrite to the hardcode one + * while do initialization for ipc router. + * without this change, two Hastings will use the same + * qrtr node instance id, which will mess up qmi message + * exchange. According to qrtr spec, every node should + * have unique qrtr node id + */ + if (plat_priv->device_id == QCA6390_DEVICE_ID && + plat_priv->qrtr_node_id) { + u32 val; + + cnss_pr_dbg("write 0x%x to QCA6390_PCIE_SCRATCH_0_SOC_PCIE_REG\n", + plat_priv->qrtr_node_id); + ret = cnss_pci_reg_write(pci_priv, + QCA6390_PCIE_SCRATCH_0_SOC_PCIE_REG, + plat_priv->qrtr_node_id); + if (ret) { + cnss_pr_err("Failed to write register offset 0x%x, err = %d\n", + QCA6390_PCIE_SCRATCH_0_SOC_PCIE_REG, ret); + goto out; + } + if (cnss_pci_reg_read(pci_priv, + QCA6390_PCIE_SCRATCH_0_SOC_PCIE_REG, + &val)) + cnss_pr_err("Failed to read QCA6390_PCIE_SCRATCH_0_SOC_PCIE_REG"); + + if (val != plat_priv->qrtr_node_id) { + cnss_pr_err("qrtr node id write to register doesn't match with readout value"); + goto out; + } + } ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_POWER_ON); if (ret) goto out; @@ -3371,6 +3555,7 @@ static int cnss_pci_get_smmu_cfg(struct cnss_plat_data *plat_priv) } pci_priv->smmu_iova_ipa_start = res->start; + pci_priv->smmu_iova_ipa_current = res->start; pci_priv->smmu_iova_ipa_len = resource_size(res); cnss_pr_dbg("%s - smmu_iova_ipa_start: %pa, smmu_iova_ipa_len: %zu\n", (plat_priv->is_converged_dt ? @@ -3393,7 +3578,8 @@ static int cnss_pci_probe(struct pci_dev *pci_dev, { int ret = 0; struct cnss_pci_data *pci_priv; - struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL); + int rc_num = pci_dev->bus->domain_nr; + struct cnss_plat_data *plat_priv = cnss_get_plat_priv_by_rc_num(rc_num); cnss_pr_dbg("PCI is probing, vendor ID: 0x%x, device ID: 0x%x\n", id->vendor, pci_dev->device); @@ -3458,6 +3644,8 @@ static int cnss_pci_probe(struct pci_dev *pci_dev, switch (pci_dev->device) { case QCA6174_DEVICE_ID: + if (cnss_get_dual_wlan() && !plat_priv->enumerate_done) + break; pci_read_config_word(pci_dev, QCA6174_REV_ID_OFFSET, &pci_priv->revision_id); ret = cnss_suspend_pci_link(pci_priv); @@ -3468,7 +3656,9 @@ static int cnss_pci_probe(struct pci_dev *pci_dev, break; case QCA6290_DEVICE_ID: case QCA6390_DEVICE_ID: - cnss_pci_set_wlaon_pwr_ctrl(pci_priv, false, false, false); + if (cnss_get_dual_wlan() && plat_priv->enumerate_done) + cnss_pci_set_wlaon_pwr_ctrl(pci_priv, false, + false, false); case QCN7605_DEVICE_ID: setup_timer(&pci_priv->dev_rddm_timer, cnss_dev_rddm_timeout_hdlr, @@ -3493,6 +3683,9 @@ static int cnss_pci_probe(struct pci_dev *pci_dev, if (EMULATION_HW) break; + if (cnss_get_dual_wlan() && !plat_priv->enumerate_done) + break; + if (pci_dev->device != QCN7605_DEVICE_ID) cnss_pci_set_wlaon_pwr_ctrl(pci_priv, false, true, false); @@ -3577,7 +3770,7 @@ static const struct dev_pm_ops cnss_pm_ops = { cnss_pci_runtime_idle) }; -struct pci_driver cnss_pci_driver = { +static struct pci_driver cnss_pci_driver = { .name = "cnss_pci", .id_table = cnss_pci_id_table, .probe = cnss_pci_probe, @@ -3613,19 +3806,79 @@ int cnss_pci_init(struct cnss_plat_data *plat_priv) } } - ret = pci_register_driver(&cnss_pci_driver); - if (ret) { - cnss_pr_err("Failed to register to PCI framework, err = %d\n", - ret); - goto out; - } + /* in the dual wlan card case, if call pci_register_driver after + * finishing the first pcie device enumeration, it will cause + * the cnss_pci_probe called in advance with the second wlan card, + * and the sequence like this: + * enter msm_pcie_enumerate -> pci_bus_add_devices -> cnss_pci_probe + * -> exit msm_pcie_enumerate. + * But the correct sequence we expected is like this: + * enter msm_pcie_enumerate -> pci_bus_add_devices -> + * exit msm_pcie_enumerate -> cnss_pci_probe. + * And this unexpected sequence will make the second wlan card do + * pcie link suspend while the pcie enumeration not finished. + * So need to add below logical to avoid doing pcie link suspend + * if the enumeration has not finish. + */ + if (cnss_get_dual_wlan()) { + plat_priv->enumerate_done = true; + /* Now enumeration is finished, try to suspend PCIe link */ + if (plat_priv->bus_priv) { + struct cnss_pci_data *pci_priv = plat_priv->bus_priv; + struct pci_dev *pci_dev = pci_priv->pci_dev; + + switch (pci_dev->device) { + case QCA6174_DEVICE_ID: + pci_read_config_word(pci_dev, + QCA6174_REV_ID_OFFSET, + &pci_priv->revision_id); + ret = cnss_suspend_pci_link(pci_priv); + if (ret) + cnss_pr_err("Failed to suspend PCI link, err = %d\n", + ret); + cnss_power_off_device(plat_priv); + break; + case QCA6290_DEVICE_ID: + case QCA6390_DEVICE_ID: + case QCN7605_DEVICE_ID: + if (pci_dev->device != QCN7605_DEVICE_ID) { + cnss_pci_set_wlaon_pwr_ctrl(pci_priv, + false, + false, + false); + cnss_pci_set_wlaon_pwr_ctrl(pci_priv, + false, + true, + false); + } + ret = cnss_suspend_pci_link(pci_priv); + if (ret) + cnss_pr_err("Failed to suspend PCI link, err = %d\n", + ret); + cnss_power_off_device(plat_priv); - if (!plat_priv->bus_priv) { - cnss_pr_err("Failed to probe pci driver\n"); - ret = -ENODEV; - goto deinit; + break; + default: + cnss_pr_err("Unknown PCI device found: 0x%x\n", + pci_dev->device); + ret = -ENODEV; + } + } + } + if (!cnss_driver_registered) { + ret = pci_register_driver(&cnss_pci_driver); + if (ret) { + cnss_pr_err("Failed to register to PCI framework, err = %d\n", + ret); + goto out; + } + if (!plat_priv->bus_priv) { + cnss_pr_err("Failed to probe pci driver\n"); + ret = -ENODEV; + goto deinit; + } + cnss_driver_registered = true; } - return 0; deinit: diff --git a/drivers/net/wireless/cnss2/pci.h b/drivers/net/wireless/cnss2/pci.h index 779c8f9bcd9b77ad19ed1bd257af6f62d1084e06..b8c56d64839d7035c143c238b0c59b85067d45ac 100644 --- a/drivers/net/wireless/cnss2/pci.h +++ b/drivers/net/wireless/cnss2/pci.h @@ -76,6 +76,7 @@ struct cnss_pci_data { dma_addr_t smmu_iova_start; size_t smmu_iova_len; dma_addr_t smmu_iova_ipa_start; + dma_addr_t smmu_iova_ipa_current; size_t smmu_iova_ipa_len; void __iomem *bar; struct cnss_msi_config *msi_config; diff --git a/drivers/net/wireless/cnss2/qmi.c b/drivers/net/wireless/cnss2/qmi.c index 2e12ae36c0480d09a17661b019c294bca0b5ee09..9a5075af70c53f17b6456a0d2475ff997b2f3291 100644 --- a/drivers/net/wireless/cnss2/qmi.c +++ b/drivers/net/wireless/cnss2/qmi.c @@ -1726,9 +1726,18 @@ int cnss_qmi_init(struct cnss_plat_data *plat_priv) cnss_pr_err("Failed to initialize QMI handle, err: %d\n", ret); goto out; } - - ret = qmi_add_lookup(&plat_priv->qmi_wlfw, WLFW_SERVICE_ID_V01, - WLFW_SERVICE_VERS_V01, WLFW_SERVICE_INS_ID_V01); + /* In order to support dual wlan card attach case, + * need separate qmi service instance id for each dev + */ + if (plat_priv->qrtr_node_id != 0 && + plat_priv->wlfw_service_instance_id != 0) + ret = qmi_add_lookup(&plat_priv->qmi_wlfw, WLFW_SERVICE_ID_V01, + WLFW_SERVICE_VERS_V01, + plat_priv->wlfw_service_instance_id); + else + ret = qmi_add_lookup(&plat_priv->qmi_wlfw, WLFW_SERVICE_ID_V01, + WLFW_SERVICE_VERS_V01, + WLFW_SERVICE_INS_ID_V01); if (ret < 0) cnss_pr_err("Failed to add QMI lookup, err: %d\n", ret); diff --git a/drivers/net/wireless/cnss2/qmi.h b/drivers/net/wireless/cnss2/qmi.h index 25874c8745b9222e5a9b786c7e078f3c250fd980..f5d031015ccd83c983e856c062081147633babf7 100644 --- a/drivers/net/wireless/cnss2/qmi.h +++ b/drivers/net/wireless/cnss2/qmi.h @@ -37,6 +37,7 @@ struct cnss_qmi_event_qdss_trace_save_data { char file_name[QDSS_TRACE_FILE_NAME_MAX + 1]; }; +#define FW_ID_BASE 7 #ifdef CONFIG_CNSS2_QMI #include "wlan_firmware_service_v01.h" diff --git a/drivers/net/wireless/intersil/p54/p54usb.c b/drivers/net/wireless/intersil/p54/p54usb.c index 15661da6eedc88d1b5c6e02af6af5957e65ae9b9..39cfabf968d499b94f55ecbaccfde63a84a42108 100644 --- a/drivers/net/wireless/intersil/p54/p54usb.c +++ b/drivers/net/wireless/intersil/p54/p54usb.c @@ -64,6 +64,7 @@ static const struct usb_device_id p54u_table[] = { {USB_DEVICE(0x0db0, 0x6826)}, /* MSI UB54G (MS-6826) */ {USB_DEVICE(0x107b, 0x55f2)}, /* Gateway WGU-210 (Gemtek) */ {USB_DEVICE(0x124a, 0x4023)}, /* Shuttle PN15, Airvast WM168g, IOGear GWU513 */ + {USB_DEVICE(0x124a, 0x4026)}, /* AirVasT USB wireless device */ {USB_DEVICE(0x1435, 0x0210)}, /* Inventel UR054G */ {USB_DEVICE(0x15a9, 0x0002)}, /* Gemtek WUBI-100GW 802.11g */ {USB_DEVICE(0x1630, 0x0005)}, /* 2Wire 802.11g USB (v1) / Z-Com */ diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index 5e8e34a08b2d6466bb3c2c42c9a11f779a63e17a..79c50aebffc4ba82fb619bac008ff4ae316cea27 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -1451,7 +1451,8 @@ mwifiex_cfg80211_dump_station(struct wiphy *wiphy, struct net_device *dev, int idx, u8 *mac, struct station_info *sinfo) { struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); - static struct mwifiex_sta_node *node; + struct mwifiex_sta_node *node; + int i; if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) && priv->media_connected && idx == 0) { @@ -1461,13 +1462,10 @@ mwifiex_cfg80211_dump_station(struct wiphy *wiphy, struct net_device *dev, mwifiex_send_cmd(priv, HOST_CMD_APCMD_STA_LIST, HostCmd_ACT_GEN_GET, 0, NULL, true); - if (node && (&node->list == &priv->sta_list)) { - node = NULL; - return -ENOENT; - } - - node = list_prepare_entry(node, &priv->sta_list, list); - list_for_each_entry_continue(node, &priv->sta_list, list) { + i = 0; + list_for_each_entry(node, &priv->sta_list, list) { + if (i++ != idx) + continue; ether_addr_copy(mac, node->mac_addr); return mwifiex_dump_station_info(priv, node, sinfo); } diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c index 93eda23f0123918bda712fc800f7567c08aa83e4..7a050a75bdcbb13863de96f7abe6ef9c2f4e674c 100644 --- a/drivers/net/wireless/realtek/rtlwifi/usb.c +++ b/drivers/net/wireless/realtek/rtlwifi/usb.c @@ -910,10 +910,8 @@ static struct urb *_rtl_usb_tx_urb_setup(struct ieee80211_hw *hw, WARN_ON(NULL == skb); _urb = usb_alloc_urb(0, GFP_ATOMIC); - if (!_urb) { - kfree_skb(skb); + if (!_urb) return NULL; - } _rtl_install_trx_info(rtlusb, skb, ep_num); usb_fill_bulk_urb(_urb, rtlusb->udev, usb_sndbulkpipe(rtlusb->udev, ep_num), skb->data, skb->len, _rtl_tx_complete, skb); @@ -927,7 +925,6 @@ static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb, struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); u32 ep_num; struct urb *_urb = NULL; - struct sk_buff *_skb = NULL; WARN_ON(NULL == rtlusb->usb_tx_aggregate_hdl); if (unlikely(IS_USB_STOP(rtlusb))) { @@ -936,8 +933,7 @@ static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb, return; } ep_num = rtlusb->ep_map.ep_mapping[qnum]; - _skb = skb; - _urb = _rtl_usb_tx_urb_setup(hw, _skb, ep_num); + _urb = _rtl_usb_tx_urb_setup(hw, skb, ep_num); if (unlikely(!_urb)) { pr_err("Can't allocate urb. Drop skb!\n"); kfree_skb(skb); diff --git a/drivers/nfc/st21nfca/dep.c b/drivers/nfc/st21nfca/dep.c index 3420c5104c9432c82f11ae0bf77e802bf166b107..3f81bfdece697a28ea73b81e3c0a32be1a0e5a70 100644 --- a/drivers/nfc/st21nfca/dep.c +++ b/drivers/nfc/st21nfca/dep.c @@ -184,8 +184,10 @@ static int st21nfca_tm_send_atr_res(struct nfc_hci_dev *hdev, memcpy(atr_res->gbi, atr_req->gbi, gb_len); r = nfc_set_remote_general_bytes(hdev->ndev, atr_res->gbi, gb_len); - if (r < 0) + if (r < 0) { + kfree_skb(skb); return r; + } } info->dep_info.curr_nfc_dep_pni = 0; diff --git a/drivers/ntb/ntb.c b/drivers/ntb/ntb.c index 03b80d89b98003cd20a05823dd80610d26b23920..b75ec229b39a5f897dacb4337b95897fa4f83c59 100644 --- a/drivers/ntb/ntb.c +++ b/drivers/ntb/ntb.c @@ -216,10 +216,8 @@ int ntb_default_port_number(struct ntb_dev *ntb) case NTB_TOPO_B2B_DSD: return NTB_PORT_SEC_DSD; default: - break; + return 0; } - - return -EINVAL; } EXPORT_SYMBOL(ntb_default_port_number); @@ -242,10 +240,8 @@ int ntb_default_peer_port_number(struct ntb_dev *ntb, int pidx) case NTB_TOPO_B2B_DSD: return NTB_PORT_PRI_USD; default: - break; + return 0; } - - return -EINVAL; } EXPORT_SYMBOL(ntb_default_peer_port_number); diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c index 609f5f99e56eab2a154a81bb1981d8588c8dd6e1..d382e00f655e6add791b2ef48838c467d2fd1b18 100644 --- a/drivers/nvdimm/btt.c +++ b/drivers/nvdimm/btt.c @@ -400,9 +400,9 @@ static int btt_flog_write(struct arena_info *arena, u32 lane, u32 sub, arena->freelist[lane].sub = 1 - arena->freelist[lane].sub; if (++(arena->freelist[lane].seq) == 4) arena->freelist[lane].seq = 1; - if (ent_e_flag(ent->old_map)) + if (ent_e_flag(le32_to_cpu(ent->old_map))) arena->freelist[lane].has_err = 1; - arena->freelist[lane].block = le32_to_cpu(ent_lba(ent->old_map)); + arena->freelist[lane].block = ent_lba(le32_to_cpu(ent->old_map)); return ret; } @@ -541,9 +541,9 @@ static int arena_clear_freelist_error(struct arena_info *arena, u32 lane) static int btt_freelist_init(struct arena_info *arena) { - int old, new, ret; - u32 i, map_entry; - struct log_entry log_new, log_old; + int new, ret; + struct log_entry log_new; + u32 i, map_entry, log_oldmap, log_newmap; arena->freelist = kcalloc(arena->nfree, sizeof(struct free_entry), GFP_KERNEL); @@ -551,24 +551,26 @@ static int btt_freelist_init(struct arena_info *arena) return -ENOMEM; for (i = 0; i < arena->nfree; i++) { - old = btt_log_read(arena, i, &log_old, LOG_OLD_ENT); - if (old < 0) - return old; - new = btt_log_read(arena, i, &log_new, LOG_NEW_ENT); if (new < 0) return new; + /* old and new map entries with any flags stripped out */ + log_oldmap = ent_lba(le32_to_cpu(log_new.old_map)); + log_newmap = ent_lba(le32_to_cpu(log_new.new_map)); + /* sub points to the next one to be overwritten */ arena->freelist[i].sub = 1 - new; arena->freelist[i].seq = nd_inc_seq(le32_to_cpu(log_new.seq)); - arena->freelist[i].block = le32_to_cpu(log_new.old_map); + arena->freelist[i].block = log_oldmap; /* * FIXME: if error clearing fails during init, we want to make * the BTT read-only */ - if (ent_e_flag(log_new.old_map)) { + if (ent_e_flag(le32_to_cpu(log_new.old_map)) && + !ent_normal(le32_to_cpu(log_new.old_map))) { + arena->freelist[i].has_err = 1; ret = arena_clear_freelist_error(arena, i); if (ret) dev_err_ratelimited(to_dev(arena), @@ -576,7 +578,7 @@ static int btt_freelist_init(struct arena_info *arena) } /* This implies a newly created or untouched flog entry */ - if (log_new.old_map == log_new.new_map) + if (log_oldmap == log_newmap) continue; /* Check if map recovery is needed */ @@ -584,8 +586,15 @@ static int btt_freelist_init(struct arena_info *arena) NULL, NULL, 0); if (ret) return ret; - if ((le32_to_cpu(log_new.new_map) != map_entry) && - (le32_to_cpu(log_new.old_map) == map_entry)) { + + /* + * The map_entry from btt_read_map is stripped of any flag bits, + * so use the stripped out versions from the log as well for + * testing whether recovery is needed. For restoration, use the + * 'raw' version of the log entries as that captured what we + * were going to write originally. + */ + if ((log_newmap != map_entry) && (log_oldmap == map_entry)) { /* * Last transaction wrote the flog, but wasn't able * to complete the map write. So fix up the map. diff --git a/drivers/nvdimm/btt.h b/drivers/nvdimm/btt.h index 2609683c4167963f7fd8f03b27dee44c39ebf7b7..c3e6a5da2ec7287e2b12710c23c24048114c33c9 100644 --- a/drivers/nvdimm/btt.h +++ b/drivers/nvdimm/btt.h @@ -44,6 +44,8 @@ #define ent_e_flag(ent) (!!(ent & MAP_ERR_MASK)) #define ent_z_flag(ent) (!!(ent & MAP_TRIM_MASK)) #define set_e_flag(ent) (ent |= MAP_ERR_MASK) +/* 'normal' is both e and z flags set */ +#define ent_normal(ent) (ent_e_flag(ent) && ent_z_flag(ent)) enum btt_init_state { INIT_UNCHECKED = 0, diff --git a/drivers/nvdimm/btt_devs.c b/drivers/nvdimm/btt_devs.c index e610dd890263b5f2b1fcccdfe30a8a66313c6ed5..76a74e292fd707b702aadb1d8f51b16dbe4d2963 100644 --- a/drivers/nvdimm/btt_devs.c +++ b/drivers/nvdimm/btt_devs.c @@ -159,11 +159,19 @@ static ssize_t size_show(struct device *dev, } static DEVICE_ATTR_RO(size); +static ssize_t log_zero_flags_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "Y\n"); +} +static DEVICE_ATTR_RO(log_zero_flags); + static struct attribute *nd_btt_attributes[] = { &dev_attr_sector_size.attr, &dev_attr_namespace.attr, &dev_attr_uuid.attr, &dev_attr_size.attr, + &dev_attr_log_zero_flags.attr, NULL, }; diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c index ace9958f29051bb67bd4e70e89971c09fb94be38..6ed3b4ed27dd3fd66e9e1cb3dc2bc5ae3b1c24bc 100644 --- a/drivers/nvdimm/namespace_devs.c +++ b/drivers/nvdimm/namespace_devs.c @@ -1978,7 +1978,7 @@ struct device *create_namespace_pmem(struct nd_region *nd_region, nd_mapping = &nd_region->mapping[i]; label_ent = list_first_entry_or_null(&nd_mapping->labels, typeof(*label_ent), list); - label0 = label_ent ? label_ent->label : 0; + label0 = label_ent ? label_ent->label : NULL; if (!label0) { WARN_ON(1); @@ -2315,8 +2315,9 @@ static struct device **scan_labels(struct nd_region *nd_region) continue; /* skip labels that describe extents outside of the region */ - if (nd_label->dpa < nd_mapping->start || nd_label->dpa > map_end) - continue; + if (__le64_to_cpu(nd_label->dpa) < nd_mapping->start || + __le64_to_cpu(nd_label->dpa) > map_end) + continue; i = add_namespace_resource(nd_region, nd_label, devs, count); if (i < 0) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index a760c449f4a90566db6025a363a9405a6f62ef80..2d95755092e30ee418e957f3cfd84f97e18e318b 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -758,6 +758,19 @@ void nvme_stop_keep_alive(struct nvme_ctrl *ctrl) } EXPORT_SYMBOL_GPL(nvme_stop_keep_alive); +/* + * In NVMe 1.0 the CNS field was just a binary controller or namespace + * flag, thus sending any new CNS opcodes has a big chance of not working. + * Qemu unfortunately had that bug after reporting a 1.1 version compliance + * (but not for any later version). + */ +static bool nvme_ctrl_limited_cns(struct nvme_ctrl *ctrl) +{ + if (ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS) + return ctrl->vs < NVME_VS(1, 2, 0); + return ctrl->vs < NVME_VS(1, 1, 0); +} + static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id) { struct nvme_command c = { }; @@ -2538,8 +2551,7 @@ static void nvme_scan_work(struct work_struct *work) return; nn = le32_to_cpu(id->nn); - if (ctrl->vs >= NVME_VS(1, 1, 0) && - !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) { + if (!nvme_ctrl_limited_cns(ctrl)) { if (!nvme_scan_ns_list(ctrl, nn)) goto done; } diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 9fffe41ead5009c497d65ce16bcf67da31bd41ea..c91bfd839cabec10a6c2a308d6cdcaca664ef91f 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -470,7 +470,7 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue) * Spread I/O queues completion vectors according their queue index. * Admin queues can always go on completion vector 0. */ - comp_vector = idx == 0 ? idx : idx - 1; + comp_vector = (idx == 0 ? idx : idx - 1) % ibdev->num_comp_vectors; /* +1 for ib_stop_cq */ queue->ib_cq = ib_alloc_cq(ibdev, queue, diff --git a/drivers/nvmem/qfprom.c b/drivers/nvmem/qfprom.c index 2bdb6c3893281c65d12a3916c037c07acba8480d..d3e08496e49bc11d67dc978818bcf87092ee3009 100644 --- a/drivers/nvmem/qfprom.c +++ b/drivers/nvmem/qfprom.c @@ -30,19 +30,6 @@ static int qfprom_reg_read(void *context, return 0; } -static int qfprom_reg_write(void *context, - unsigned int reg, void *_val, size_t bytes) -{ - void __iomem *base = context; - u8 *val = _val; - int i = 0, words = bytes; - - while (words--) - writeb(*val++, base + reg + i++); - - return 0; -} - static int qfprom_remove(struct platform_device *pdev) { struct nvmem_device *nvmem = platform_get_drvdata(pdev); @@ -56,7 +43,6 @@ static struct nvmem_config econfig = { .stride = 1, .word_size = 1, .reg_read = qfprom_reg_read, - .reg_write = qfprom_reg_write, }; static int qfprom_probe(struct platform_device *pdev) diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c index 69da2f6896dae5261533ad11dcddfc75a002119d..8b7d3e64b8cab66faf8b500b5fcaf58801300de3 100644 --- a/drivers/of/of_mdio.c +++ b/drivers/of/of_mdio.c @@ -256,10 +256,15 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) child->name, addr); if (of_mdiobus_child_is_phy(child)) { + /* -ENODEV is the return code that PHYLIB has + * standardized on to indicate that bus + * scanning should continue. + */ rc = of_mdiobus_register_phy(mdio, child, addr); - if (rc && rc != -ENODEV) + if (!rc) + break; + if (rc != -ENODEV) goto unregister; - break; } } } diff --git a/drivers/pci/host/pci-aardvark.c b/drivers/pci/host/pci-aardvark.c index 5f3048e75becb598b14013382ed102fa59a6f5b4..c1db09fbbe04167c548587b069cae8757a4350c5 100644 --- a/drivers/pci/host/pci-aardvark.c +++ b/drivers/pci/host/pci-aardvark.c @@ -365,10 +365,6 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie) advk_pcie_wait_for_link(pcie); - reg = PCIE_CORE_LINK_L0S_ENTRY | - (1 << PCIE_CORE_LINK_WIDTH_SHIFT); - advk_writel(pcie, reg, PCIE_CORE_LINK_CTRL_STAT_REG); - reg = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG); reg |= PCIE_CORE_CMD_MEM_ACCESS_EN | PCIE_CORE_CMD_IO_ACCESS_EN | diff --git a/drivers/pci/host/pci-msm.c b/drivers/pci/host/pci-msm.c index febc6791a30519c9ded64aab70e90749cd30fa6c..c5d4dfcc86bd830d4defdf8cd5d4c139f4a3153f 100644 --- a/drivers/pci/host/pci-msm.c +++ b/drivers/pci/host/pci-msm.c @@ -7150,7 +7150,8 @@ int msm_pcie_pm_control(enum msm_pcie_pm_opt pm_opt, u32 busnr, void *user, { int ret = 0; struct pci_dev *dev; - u32 rc_idx = 0; + u32 rc_idx = 0, count = 0; + u16 device_id; struct msm_pcie_dev_t *pcie_dev; PCIE_GEN_DBG("PCIe: pm_opt:%d;busnr:%d;options:%d\n", @@ -7240,6 +7241,23 @@ int msm_pcie_pm_control(enum msm_pcie_pm_opt pm_opt, u32 busnr, void *user, "PCIe: RC%d: requested to resume when link is not disabled:%d. Number of active EP(s): %d\n", rc_idx, msm_pcie_dev[rc_idx].link_status, msm_pcie_dev[rc_idx].num_active_ep); + pci_read_config_word((struct pci_dev *)user, + PCI_DEVICE_ID, &device_id); + while (device_id != (((struct pci_dev *)user)->device) + && count < LINK_UP_CHECK_MAX_COUNT) { + usleep_range(LINK_UP_TIMEOUT_US_MIN, + LINK_UP_TIMEOUT_US_MAX); + pci_read_config_word((struct pci_dev *)user, + PCI_DEVICE_ID, &device_id); + PCIE_DBG(&msm_pcie_dev[rc_idx], + "PCIe: RC:%d, device_id_read:0x%x\n", + rc_idx, device_id); + count++; + } + if (count >= LINK_UP_CHECK_MAX_COUNT) + PCIE_ERR(&msm_pcie_dev[rc_idx], + "PCIe: RC:%d invalid device id\n", + rc_idx); break; } diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c index 2b0a1f3b8265fefce459a6c8238c9b26f3e317ad..0077afca24932dab7dcc6957cca817b40d5ac83a 100644 --- a/drivers/pci/host/pcie-rcar.c +++ b/drivers/pci/host/pcie-rcar.c @@ -328,11 +328,12 @@ static struct pci_ops rcar_pcie_ops = { }; static void rcar_pcie_setup_window(int win, struct rcar_pcie *pcie, - struct resource *res) + struct resource_entry *window) { /* Setup PCIe address space mappings for each resource */ resource_size_t size; resource_size_t res_start; + struct resource *res = window->res; u32 mask; rcar_pci_write_reg(pcie, 0x00000000, PCIEPTCTLR(win)); @@ -346,9 +347,9 @@ static void rcar_pcie_setup_window(int win, struct rcar_pcie *pcie, rcar_pci_write_reg(pcie, mask << 7, PCIEPAMR(win)); if (res->flags & IORESOURCE_IO) - res_start = pci_pio_to_address(res->start); + res_start = pci_pio_to_address(res->start) - window->offset; else - res_start = res->start; + res_start = res->start - window->offset; rcar_pci_write_reg(pcie, upper_32_bits(res_start), PCIEPAUR(win)); rcar_pci_write_reg(pcie, lower_32_bits(res_start) & ~0x7F, @@ -377,7 +378,7 @@ static int rcar_pcie_setup(struct list_head *resource, struct rcar_pcie *pci) switch (resource_type(res)) { case IORESOURCE_IO: case IORESOURCE_MEM: - rcar_pcie_setup_window(i, pci, res); + rcar_pcie_setup_window(i, pci, win); i++; break; case IORESOURCE_BUS: diff --git a/drivers/pci/host/vmd.c b/drivers/pci/host/vmd.c index af6d5da10ea5f89bdc47d5ddb463201569f75783..05f191ae0ff1b09c8b0f2c39e4e8dd5d86b1f637 100644 --- a/drivers/pci/host/vmd.c +++ b/drivers/pci/host/vmd.c @@ -638,9 +638,10 @@ static int vmd_enable_domain(struct vmd_dev *vmd) vmd->irq_domain = pci_msi_create_irq_domain(fn, &vmd_msi_domain_info, x86_vector_domain); - irq_domain_free_fwnode(fn); - if (!vmd->irq_domain) + if (!vmd->irq_domain) { + irq_domain_free_fwnode(fn); return -ENODEV; + } pci_add_resource(&resources, &vmd->resources[0]); pci_add_resource(&resources, &vmd->resources[1]); diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index 9146f01ffeb251496b4a59771674dcacf5b53bc0..13bb1d5a4b49eae387d1b11f3a0138ab5c01113b 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -584,16 +584,6 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist) /* Setup initial capable state. Will be updated later */ link->aspm_capable = link->aspm_support; - /* - * If the downstream component has pci bridge function, don't - * do ASPM for now. - */ - list_for_each_entry(child, &linkbus->devices, bus_list) { - if (pci_pcie_type(child) == PCI_EXP_TYPE_PCI_BRIDGE) { - link->aspm_disable = ASPM_STATE_ALL; - break; - } - } /* Get and check endpoint acceptable latencies */ list_for_each_entry(child, &linkbus->devices, bus_list) { diff --git a/drivers/pci/pcie/ptm.c b/drivers/pci/pcie/ptm.c index 3008bba360f354216e4aba113005ca8a43abd80c..ec6f6213960b4ecccbb195e039c60421b6204fba 100644 --- a/drivers/pci/pcie/ptm.c +++ b/drivers/pci/pcie/ptm.c @@ -47,10 +47,6 @@ void pci_ptm_init(struct pci_dev *dev) if (!pci_is_pcie(dev)) return; - pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_PTM); - if (!pos) - return; - /* * Enable PTM only on interior devices (root ports, switch ports, * etc.) on the assumption that it causes no link traffic until an @@ -60,6 +56,23 @@ void pci_ptm_init(struct pci_dev *dev) pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END)) return; + /* + * Switch Downstream Ports are not permitted to have a PTM + * capability; their PTM behavior is controlled by the Upstream + * Port (PCIe r5.0, sec 7.9.16). + */ + ups = pci_upstream_bridge(dev); + if (pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM && + ups && ups->ptm_enabled) { + dev->ptm_granularity = ups->ptm_granularity; + dev->ptm_enabled = 1; + return; + } + + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_PTM); + if (!pos) + return; + pci_read_config_dword(dev, pos + PCI_PTM_CAP, &cap); local_clock = (cap & PCI_PTM_GRANULARITY_MASK) >> 8; @@ -69,7 +82,6 @@ void pci_ptm_init(struct pci_dev *dev) * the spec recommendation (PCIe r3.1, sec 7.32.3), select the * furthest upstream Time Source as the PTM Root. */ - ups = pci_upstream_bridge(dev); if (ups && ups->ptm_enabled) { ctrl = PCI_PTM_CTRL_ENABLE; if (ups->ptm_granularity == 0) diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index e23bfd9845b12f3808e88838bbb1bdec5c4dcc81..55ece07e584a07cb63df7a8453c64285c608262f 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -792,9 +792,10 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge) goto free; err = device_register(&bridge->dev); - if (err) + if (err) { put_device(&bridge->dev); - + goto free; + } bus->bridge = get_device(&bridge->dev); device_enable_async_suspend(bus->bridge); pci_set_bus_of_node(bus); @@ -1446,7 +1447,7 @@ int pci_setup_device(struct pci_dev *dev) /* device class may be changed after fixup */ class = dev->class >> 8; - if (dev->non_compliant_bars) { + if (dev->non_compliant_bars && !dev->mmio_always_on) { pci_read_config_word(dev, PCI_COMMAND, &cmd); if (cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) { dev_info(&dev->dev, "device has non-compliant BARs; disabling IO/MEM decoding\n"); @@ -1557,13 +1558,33 @@ static void pci_configure_mps(struct pci_dev *dev) struct pci_dev *bridge = pci_upstream_bridge(dev); int mps, p_mps, rc; - if (!pci_is_pcie(dev) || !bridge || !pci_is_pcie(bridge)) + if (!pci_is_pcie(dev)) return; /* MPS and MRRS fields are of type 'RsvdP' for VFs, short-circuit out */ if (dev->is_virtfn) return; + /* + * For Root Complex Integrated Endpoints, program the maximum + * supported value unless limited by the PCIE_BUS_PEER2PEER case. + */ + if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END) { + if (pcie_bus_config == PCIE_BUS_PEER2PEER) + mps = 128; + else + mps = 128 << dev->pcie_mpss; + rc = pcie_set_mps(dev, mps); + if (rc) { + pci_warn(dev, "can't set Max Payload Size to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n", + mps); + } + return; + } + + if (!bridge || !pci_is_pcie(bridge)) + return; + mps = pcie_get_mps(dev); p_mps = pcie_get_mps(bridge); diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 530646c4c5913ac4723b2fe01f5e19aa83aa57a2..f7e1aabd05da4ebe550a6128861cced70be81d9e 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -27,6 +27,7 @@ #include #include #include +#include #include /* isa_dma_bridge_buggy */ #include "pci.h" @@ -4235,6 +4236,24 @@ static void quirk_chelsio_T5_disable_root_port_attributes(struct pci_dev *pdev) DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID, quirk_chelsio_T5_disable_root_port_attributes); +/* + * pci_acs_ctrl_enabled - compare desired ACS controls with those provided + * by a device + * @acs_ctrl_req: Bitmask of desired ACS controls + * @acs_ctrl_ena: Bitmask of ACS controls enabled or provided implicitly by + * the hardware design + * + * Return 1 if all ACS controls in the @acs_ctrl_req bitmask are included + * in @acs_ctrl_ena, i.e., the device provides all the access controls the + * caller desires. Return 0 otherwise. + */ +static int pci_acs_ctrl_enabled(u16 acs_ctrl_req, u16 acs_ctrl_ena) +{ + if ((acs_ctrl_req & acs_ctrl_ena) == acs_ctrl_req) + return 1; + return 0; +} + /* * AMD has indicated that the devices below do not support peer-to-peer * in any system where they are found in the southbridge with an AMD @@ -4278,7 +4297,7 @@ static int pci_quirk_amd_sb_acs(struct pci_dev *dev, u16 acs_flags) /* Filter out flags not applicable to multifunction */ acs_flags &= (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC | PCI_ACS_DT); - return acs_flags & ~(PCI_ACS_RR | PCI_ACS_CR) ? 0 : 1; + return pci_acs_ctrl_enabled(acs_flags, PCI_ACS_RR | PCI_ACS_CR); #else return -ENODEV; #endif @@ -4305,20 +4324,19 @@ static bool pci_quirk_cavium_acs_match(struct pci_dev *dev) static int pci_quirk_cavium_acs(struct pci_dev *dev, u16 acs_flags) { + if (!pci_quirk_cavium_acs_match(dev)) + return -ENOTTY; + /* - * Cavium root ports don't advertise an ACS capability. However, + * Cavium Root Ports don't advertise an ACS capability. However, * the RTL internally implements similar protection as if ACS had - * Request Redirection, Completion Redirection, Source Validation, + * Source Validation, Request Redirection, Completion Redirection, * and Upstream Forwarding features enabled. Assert that the * hardware implements and enables equivalent ACS functionality for * these flags. */ - acs_flags &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_SV | PCI_ACS_UF); - - if (!pci_quirk_cavium_acs_match(dev)) - return -ENOTTY; - - return acs_flags ? 0 : 1; + return pci_acs_ctrl_enabled(acs_flags, + PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF); } static int pci_quirk_xgene_acs(struct pci_dev *dev, u16 acs_flags) @@ -4328,13 +4346,12 @@ static int pci_quirk_xgene_acs(struct pci_dev *dev, u16 acs_flags) * transactions with others, allowing masking out these bits as if they * were unimplemented in the ACS capability. */ - acs_flags &= ~(PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF); - - return acs_flags ? 0 : 1; + return pci_acs_ctrl_enabled(acs_flags, + PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF); } /* - * Many Intel PCH root ports do provide ACS-like features to disable peer + * Many Intel PCH Root Ports do provide ACS-like features to disable peer * transactions and validate bus numbers in requests, but do not provide an * actual PCIe ACS capability. This is the list of device IDs known to fall * into that category as provided by Intel in Red Hat bugzilla 1037684. @@ -4382,37 +4399,32 @@ static bool pci_quirk_intel_pch_acs_match(struct pci_dev *dev) return false; } -#define INTEL_PCH_ACS_FLAGS (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_SV) - static int pci_quirk_intel_pch_acs(struct pci_dev *dev, u16 acs_flags) { - u16 flags = dev->dev_flags & PCI_DEV_FLAGS_ACS_ENABLED_QUIRK ? - INTEL_PCH_ACS_FLAGS : 0; - if (!pci_quirk_intel_pch_acs_match(dev)) return -ENOTTY; - return acs_flags & ~flags ? 0 : 1; + if (dev->dev_flags & PCI_DEV_FLAGS_ACS_ENABLED_QUIRK) + return pci_acs_ctrl_enabled(acs_flags, + PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF); + + return pci_acs_ctrl_enabled(acs_flags, 0); } /* - * These QCOM root ports do provide ACS-like features to disable peer + * These QCOM Root Ports do provide ACS-like features to disable peer * transactions and validate bus numbers in requests, but do not provide an * actual PCIe ACS capability. Hardware supports source validation but it * will report the issue as Completer Abort instead of ACS Violation. - * Hardware doesn't support peer-to-peer and each root port is a root - * complex with unique segment numbers. It is not possible for one root - * port to pass traffic to another root port. All PCIe transactions are - * terminated inside the root port. + * Hardware doesn't support peer-to-peer and each Root Port is a Root + * Complex with unique segment numbers. It is not possible for one Root + * Port to pass traffic to another Root Port. All PCIe transactions are + * terminated inside the Root Port. */ static int pci_quirk_qcom_rp_acs(struct pci_dev *dev, u16 acs_flags) { - u16 flags = (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_SV); - int ret = acs_flags & ~flags ? 0 : 1; - - dev_info(&dev->dev, "Using QCOM ACS Quirk (%d)\n", ret); - - return ret; + return pci_acs_ctrl_enabled(acs_flags, + PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF); } /* @@ -4495,7 +4507,7 @@ static int pci_quirk_intel_spt_pch_acs(struct pci_dev *dev, u16 acs_flags) pci_read_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, &ctrl); - return acs_flags & ~ctrl ? 0 : 1; + return pci_acs_ctrl_enabled(acs_flags, ctrl); } static int pci_quirk_mf_endpoint_acs(struct pci_dev *dev, u16 acs_flags) @@ -4509,10 +4521,35 @@ static int pci_quirk_mf_endpoint_acs(struct pci_dev *dev, u16 acs_flags) * perform peer-to-peer with other functions, allowing us to mask out * these bits as if they were unimplemented in the ACS capability. */ - acs_flags &= ~(PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR | - PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT); + return pci_acs_ctrl_enabled(acs_flags, + PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR | + PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT); +} - return acs_flags ? 0 : 1; +static int pci_quirk_rciep_acs(struct pci_dev *dev, u16 acs_flags) +{ + /* + * Intel RCiEP's are required to allow p2p only on translated + * addresses. Refer to Intel VT-d specification, r3.1, sec 3.16, + * "Root-Complex Peer to Peer Considerations". + */ + if (pci_pcie_type(dev) != PCI_EXP_TYPE_RC_END) + return -ENOTTY; + + return pci_acs_ctrl_enabled(acs_flags, + PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF); +} + +static int pci_quirk_brcm_acs(struct pci_dev *dev, u16 acs_flags) +{ + /* + * iProc PAXB Root Ports don't advertise an ACS capability, but + * they do not allow peer-to-peer transactions between Root Ports. + * Allow each Root Port to be in a separate IOMMU group by masking + * SV/RR/CR/UF bits. + */ + return pci_acs_ctrl_enabled(acs_flags, + PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF); } static const struct pci_dev_acs_enabled { @@ -4585,6 +4622,7 @@ static const struct pci_dev_acs_enabled { /* I219 */ { PCI_VENDOR_ID_INTEL, 0x15b7, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x15b8, pci_quirk_mf_endpoint_acs }, + { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_rciep_acs }, /* QCOM QDF2xxx root ports */ { 0x17cb, 0x400, pci_quirk_qcom_rp_acs }, { 0x17cb, 0x401, pci_quirk_qcom_rp_acs }, @@ -4598,9 +4636,30 @@ static const struct pci_dev_acs_enabled { { PCI_VENDOR_ID_CAVIUM, PCI_ANY_ID, pci_quirk_cavium_acs }, /* APM X-Gene */ { PCI_VENDOR_ID_AMCC, 0xE004, pci_quirk_xgene_acs }, + /* Ampere Computing */ + { PCI_VENDOR_ID_AMPERE, 0xE005, pci_quirk_xgene_acs }, + { PCI_VENDOR_ID_AMPERE, 0xE006, pci_quirk_xgene_acs }, + { PCI_VENDOR_ID_AMPERE, 0xE007, pci_quirk_xgene_acs }, + { PCI_VENDOR_ID_AMPERE, 0xE008, pci_quirk_xgene_acs }, + { PCI_VENDOR_ID_AMPERE, 0xE009, pci_quirk_xgene_acs }, + { PCI_VENDOR_ID_AMPERE, 0xE00A, pci_quirk_xgene_acs }, + { PCI_VENDOR_ID_AMPERE, 0xE00B, pci_quirk_xgene_acs }, + { PCI_VENDOR_ID_AMPERE, 0xE00C, pci_quirk_xgene_acs }, + { PCI_VENDOR_ID_BROADCOM, 0xD714, pci_quirk_brcm_acs }, { 0 } }; +/* + * pci_dev_specific_acs_enabled - check whether device provides ACS controls + * @dev: PCI device + * @acs_flags: Bitmask of desired ACS controls + * + * Returns: + * -ENOTTY: No quirk applies to this device; we can't tell whether the + * device provides the desired controls + * 0: Device does not provide all the desired controls + * >0: Device provides all the controls in @acs_flags + */ int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags) { const struct pci_dev_acs_enabled *i; @@ -4870,13 +4929,25 @@ static void quirk_intel_qat_vf_cap(struct pci_dev *pdev) } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x443, quirk_intel_qat_vf_cap); -/* FLR may cause some 82579 devices to hang. */ -static void quirk_intel_no_flr(struct pci_dev *dev) +/* + * FLR may cause the following to devices to hang: + * + * AMD Starship/Matisse HD Audio Controller 0x1487 + * AMD Starship USB 3.0 Host Controller 0x148c + * AMD Matisse USB 3.0 Host Controller 0x149c + * Intel 82579LM Gigabit Ethernet Controller 0x1502 + * Intel 82579V Gigabit Ethernet Controller 0x1503 + * + */ +static void quirk_no_flr(struct pci_dev *dev) { dev->dev_flags |= PCI_DEV_FLAGS_NO_FLR_RESET; } -DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1502, quirk_intel_no_flr); -DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1503, quirk_intel_no_flr); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x1487, quirk_no_flr); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x148c, quirk_no_flr); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x149c, quirk_no_flr); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1502, quirk_no_flr); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1503, quirk_no_flr); static void quirk_no_ext_tags(struct pci_dev *pdev) { @@ -4913,3 +4984,63 @@ static void quirk_no_ats(struct pci_dev *pdev) DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_no_ats); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_no_ats); #endif /* CONFIG_PCI_ATS */ + +/* Freescale PCIe doesn't support MSI in RC mode */ +static void quirk_fsl_no_msi(struct pci_dev *pdev) +{ + if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT) + pdev->no_msi = 1; +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, quirk_fsl_no_msi); + +/* + * Although not allowed by the spec, some multi-function devices have + * dependencies of one function (consumer) on another (supplier). For the + * consumer to work in D0, the supplier must also be in D0. Create a + * device link from the consumer to the supplier to enforce this + * dependency. Runtime PM is allowed by default on the consumer to prevent + * it from permanently keeping the supplier awake. + */ +static void pci_create_device_link(struct pci_dev *pdev, unsigned int consumer, + unsigned int supplier, unsigned int class, + unsigned int class_shift) +{ + struct pci_dev *supplier_pdev; + + if (PCI_FUNC(pdev->devfn) != consumer) + return; + + supplier_pdev = pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus), + pdev->bus->number, + PCI_DEVFN(PCI_SLOT(pdev->devfn), supplier)); + if (!supplier_pdev || (supplier_pdev->class >> class_shift) != class) { + pci_dev_put(supplier_pdev); + return; + } + + if (device_link_add(&pdev->dev, &supplier_pdev->dev, + DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME)) + pci_info(pdev, "D0 power state depends on %s\n", + pci_name(supplier_pdev)); + else + pci_err(pdev, "Cannot enforce power dependency on %s\n", + pci_name(supplier_pdev)); + + pm_runtime_allow(&pdev->dev); + pci_dev_put(supplier_pdev); +} + +/* + * Create device link for GPUs with integrated HDA controller for streaming + * audio to attached displays. + */ +static void quirk_gpu_hda(struct pci_dev *hda) +{ + pci_create_device_link(hda, 1, 0, PCI_BASE_CLASS_DISPLAY, 16); +} +DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_ATI, PCI_ANY_ID, + PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8, quirk_gpu_hda); +DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_AMD, PCI_ANY_ID, + PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8, quirk_gpu_hda); +DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, + PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8, quirk_gpu_hda); diff --git a/drivers/phy/allwinner/phy-sun4i-usb.c b/drivers/phy/allwinner/phy-sun4i-usb.c index 46d60a3bf26085245358234fae3e452946ae4004..d6e47dee78b5f0ca68ddca92275b722c548dfa5f 100644 --- a/drivers/phy/allwinner/phy-sun4i-usb.c +++ b/drivers/phy/allwinner/phy-sun4i-usb.c @@ -549,13 +549,14 @@ static void sun4i_usb_phy0_id_vbus_det_scan(struct work_struct *work) struct sun4i_usb_phy_data *data = container_of(work, struct sun4i_usb_phy_data, detect.work); struct phy *phy0 = data->phys[0].phy; - struct sun4i_usb_phy *phy = phy_get_drvdata(phy0); + struct sun4i_usb_phy *phy; bool force_session_end, id_notify = false, vbus_notify = false; int id_det, vbus_det; - if (phy0 == NULL) + if (!phy0) return; + phy = phy_get_drvdata(phy0); id_det = sun4i_usb_phy0_get_id_det(data); vbus_det = sun4i_usb_phy0_get_vbus_det(data); diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c index 17f2c5a505b25ee53a084c03d1d2c9afa5e82a44..ec0119e1e7810be87136491334e0c6997caf3f94 100644 --- a/drivers/pinctrl/freescale/pinctrl-imx.c +++ b/drivers/pinctrl/freescale/pinctrl-imx.c @@ -661,16 +661,6 @@ static int imx_pinctrl_probe_dt(struct platform_device *pdev, return 0; } -/* - * imx_free_resources() - free memory used by this driver - * @info: info driver instance - */ -static void imx_free_resources(struct imx_pinctrl *ipctl) -{ - if (ipctl->pctl) - pinctrl_unregister(ipctl->pctl); -} - int imx_pinctrl_probe(struct platform_device *pdev, struct imx_pinctrl_soc_info *info) { @@ -761,21 +751,16 @@ int imx_pinctrl_probe(struct platform_device *pdev, &ipctl->pctl); if (ret) { dev_err(&pdev->dev, "could not register IMX pinctrl driver\n"); - goto free; + return ret; } ret = imx_pinctrl_probe_dt(pdev, ipctl); if (ret) { dev_err(&pdev->dev, "fail to probe dt properties\n"); - goto free; + return ret; } dev_info(&pdev->dev, "initialized IMX pinctrl driver\n"); return pinctrl_enable(ipctl->pctl); - -free: - imx_free_resources(ipctl); - - return ret; } diff --git a/drivers/pinctrl/freescale/pinctrl-imx1-core.c b/drivers/pinctrl/freescale/pinctrl-imx1-core.c index e2cca91fd2669c2953d13743279c21a852ddefd6..68108c4c3969aeea16a53e87a5155e9b7d81bc2f 100644 --- a/drivers/pinctrl/freescale/pinctrl-imx1-core.c +++ b/drivers/pinctrl/freescale/pinctrl-imx1-core.c @@ -642,7 +642,6 @@ int imx1_pinctrl_core_probe(struct platform_device *pdev, ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev); if (ret) { - pinctrl_unregister(ipctl->pctl); dev_err(&pdev->dev, "Failed to populate subdevices\n"); return ret; } diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c index 4fb3e44f91331acab6e70dcd6d5817b796d64742..2ea4bb9ce6e168fffad347276a0f2c4a157b49ef 100644 --- a/drivers/pinctrl/intel/pinctrl-baytrail.c +++ b/drivers/pinctrl/intel/pinctrl-baytrail.c @@ -1503,6 +1503,7 @@ static const struct gpio_chip byt_gpio_chip = { .direction_output = byt_gpio_direction_output, .get = byt_gpio_get, .set = byt_gpio_set, + .set_config = gpiochip_generic_config, .dbg_show = byt_gpio_dbg_show, }; diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c index 0907531a02caa26de3ed8a7918d8e3d48931c591..d39718b4242d9d09b7faf1e289fb8d70d74823f7 100644 --- a/drivers/pinctrl/intel/pinctrl-cherryview.c +++ b/drivers/pinctrl/intel/pinctrl-cherryview.c @@ -1514,11 +1514,15 @@ static void chv_gpio_irq_handler(struct irq_desc *desc) struct chv_pinctrl *pctrl = gpiochip_get_data(gc); struct irq_chip *chip = irq_desc_get_chip(desc); unsigned long pending; + unsigned long flags; u32 intr_line; chained_irq_enter(chip, desc); + raw_spin_lock_irqsave(&chv_lock, flags); pending = readl(pctrl->regs + CHV_INTSTAT); + raw_spin_unlock_irqrestore(&chv_lock, flags); + for_each_set_bit(intr_line, &pending, pctrl->community->nirqs) { unsigned irq, offset; diff --git a/drivers/pinctrl/pinctrl-amd.h b/drivers/pinctrl/pinctrl-amd.h index 8fa453a59da5e657a3cd7732171a639f6fd57d19..884f48f7a6a365fd265ee8537ff4c2456916b9aa 100644 --- a/drivers/pinctrl/pinctrl-amd.h +++ b/drivers/pinctrl/pinctrl-amd.h @@ -252,7 +252,7 @@ static const struct amd_pingroup kerncz_groups[] = { { .name = "uart0", .pins = uart0_pins, - .npins = 9, + .npins = 5, }, { .name = "uart1", diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c index 1c534d823fd792ec276691d5bc7d23d9c233f743..6925a3d969e261092d1bb1bb95cd61a29d89b409 100644 --- a/drivers/pinctrl/samsung/pinctrl-exynos.c +++ b/drivers/pinctrl/samsung/pinctrl-exynos.c @@ -266,6 +266,7 @@ struct exynos_eint_gpio_save { u32 eint_con; u32 eint_fltcon0; u32 eint_fltcon1; + u32 eint_mask; }; /* @@ -561,10 +562,13 @@ static void exynos_pinctrl_suspend_bank( + 2 * bank->eint_offset); save->eint_fltcon1 = readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET + 2 * bank->eint_offset + 4); + save->eint_mask = readl(regs + bank->irq_chip->eint_mask + + bank->eint_offset); pr_debug("%s: save con %#010x\n", bank->name, save->eint_con); pr_debug("%s: save fltcon0 %#010x\n", bank->name, save->eint_fltcon0); pr_debug("%s: save fltcon1 %#010x\n", bank->name, save->eint_fltcon1); + pr_debug("%s: save mask %#010x\n", bank->name, save->eint_mask); } void exynos_pinctrl_suspend(struct samsung_pinctrl_drv_data *drvdata) @@ -593,6 +597,9 @@ static void exynos_pinctrl_resume_bank( pr_debug("%s: fltcon1 %#010x => %#010x\n", bank->name, readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET + 2 * bank->eint_offset + 4), save->eint_fltcon1); + pr_debug("%s: mask %#010x => %#010x\n", bank->name, + readl(regs + bank->irq_chip->eint_mask + + bank->eint_offset), save->eint_mask); writel(save->eint_con, regs + EXYNOS_GPIO_ECON_OFFSET + bank->eint_offset); @@ -600,6 +607,8 @@ static void exynos_pinctrl_resume_bank( + 2 * bank->eint_offset); writel(save->eint_fltcon1, regs + EXYNOS_GPIO_EFLTCON_OFFSET + 2 * bank->eint_offset + 4); + writel(save->eint_mask, regs + bank->irq_chip->eint_mask + + bank->eint_offset); } void exynos_pinctrl_resume(struct samsung_pinctrl_drv_data *drvdata) diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c index 5cbb83a421c9716717b7374b1d03ea3ab1c9b510..9e5d51450e53d9ef04668d21530c8ed97a5d8595 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c @@ -3895,8 +3895,9 @@ static int ipa3_assign_policy(struct ipa_sys_connect_params *in, * Dont enable ipa_status for APQ, since MDM IPA * has IPA >= 4.5 with DPLv3. */ - if (ipa3_ctx->platform_type == IPA_PLAT_TYPE_APQ && - ipa3_is_mhip_offload_enabled()) + if ((ipa3_ctx->platform_type == IPA_PLAT_TYPE_APQ && + ipa3_is_mhip_offload_enabled()) || + (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5)) sys->ep->status.status_en = false; else sys->ep->status.status_en = true; diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_pm.h b/drivers/platform/msm/ipa/ipa_v3/ipa_pm.h index 7f88897506be5f6bbf80c1cbc435968b800c646c..d070ee8f2386aa85725e009914b905fc81dc2aab 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_pm.h +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_pm.h @@ -20,7 +20,7 @@ #define IPA_PM_MAX_EX_CL 64 #define IPA_PM_THRESHOLD_MAX 5 #define IPA_PM_EXCEPTION_MAX 5 -#define IPA_PM_DEFERRED_TIMEOUT 10 +#define IPA_PM_DEFERRED_TIMEOUT 100 /* * ipa_pm group names diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c index c88eb4f64c195f9cd1356edd424b362163a00aef..653e1e6941079bf6c09d7caa9a3f655e5765a2e9 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c @@ -1981,6 +1981,12 @@ static const struct ipa_ep_configuration ipa3_ep_mapping IPA_DPS_HPS_REP_SEQ_TYPE_2PKT_PROC_PASS_NO_DEC_UCP_DMAP, QMB_MASTER_SELECT_DDR, { 6, 2, 8, 16, IPA_EE_UC } }, + [IPA_4_1_APQ][IPA_CLIENT_WLAN2_PROD] = { + true, IPA_v4_0_GROUP_UL_DL, + true, + IPA_DPS_HPS_REP_SEQ_TYPE_2PKT_PROC_PASS_NO_DEC_UCP_DMAP, + QMB_MASTER_SELECT_DDR, + { 6, 11, 8, 16, IPA_EE_AP } }, [IPA_4_1_APQ][IPA_CLIENT_USB_PROD] = { true, IPA_v4_0_GROUP_UL_DL, true, @@ -2005,12 +2011,6 @@ static const struct ipa_ep_configuration ipa3_ep_mapping IPA_DPS_HPS_REP_SEQ_TYPE_2PKT_PROC_PASS_NO_DEC_UCP_DMAP, QMB_MASTER_SELECT_DDR, { 9, 0, 8, 16, IPA_EE_UC } }, - [IPA_4_1_APQ][IPA_CLIENT_WLAN2_PROD] = { - true, IPA_v4_0_GROUP_UL_DL, - true, - IPA_DPS_HPS_REP_SEQ_TYPE_2PKT_PROC_PASS_NO_DEC_UCP_DMAP, - QMB_MASTER_SELECT_DDR, - { 7, 9, 8, 16, IPA_EE_AP } }, /* Only for test purpose */ [IPA_4_1_APQ][IPA_CLIENT_TEST_PROD] = { true, IPA_v4_0_GROUP_UL_DL, @@ -2037,6 +2037,12 @@ static const struct ipa_ep_configuration ipa3_ep_mapping IPA_DPS_HPS_SEQ_TYPE_INVALID, QMB_MASTER_SELECT_DDR, { 18, 3, 9, 9, IPA_EE_UC } }, + [IPA_4_1_APQ][IPA_CLIENT_WLAN2_CONS] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 18, 15, 8, 13, IPA_EE_AP } }, [IPA_4_1_APQ][IPA_CLIENT_USB_CONS] = { true, IPA_v4_0_GROUP_UL_DL, false, @@ -2067,12 +2073,6 @@ static const struct ipa_ep_configuration ipa3_ep_mapping IPA_DPS_HPS_SEQ_TYPE_INVALID, QMB_MASTER_SELECT_DDR, { 22, 1, 9, 9, IPA_EE_UC } }, - [IPA_4_1_APQ][IPA_CLIENT_WLAN2_CONS] = { - true, IPA_v4_0_GROUP_UL_DL, - false, - IPA_DPS_HPS_SEQ_TYPE_INVALID, - QMB_MASTER_SELECT_DDR, - { 17, 1, 8, 13, IPA_EE_AP } }, /* Only for test purpose */ /* MBIM aggregation test pipes should have the same QMB as USB_CONS */ [IPA_4_1_APQ][IPA_CLIENT_TEST_CONS] = { diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c index 864f47d1bbaf82875c89c22a2a30a6f853137ba5..22e3a7fea0cc917243df9f9bfdee30f4c9e53b6d 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c @@ -3751,6 +3751,12 @@ u32 ipahal_get_reg_mn_ofst(enum ipahal_reg_name reg, u32 m, u32 n) return -EINVAL; } + if (!ipahal_ctx) { + IPAHAL_ERR_RL("ipahal_ctx is NULL\n"); + WARN_ON_RATELIMIT_IPA(1); + return -EINVAL; + } + IPAHAL_DBG_LOW("get offset of %s m=%u n=%u\n", ipahal_reg_name_str(reg), m, n); offset = ipahal_reg_objs[ipahal_ctx->hw_type][reg].offset; diff --git a/drivers/platform/msm/veth_ipa/veth_emac_mgt.c b/drivers/platform/msm/veth_ipa/veth_emac_mgt.c index dba67482f7fe982c095cbe1eca66c645480da8c4..bf3fcfc39baa3bdfe2121aa01e7941ca36a8ef47 100644 --- a/drivers/platform/msm/veth_ipa/veth_emac_mgt.c +++ b/drivers/platform/msm/veth_ipa/veth_emac_mgt.c @@ -123,28 +123,18 @@ int veth_alloc_emac_export_mem( VETH_IPA_DEBUG("%s: physical addr: tx buf mem 0x%x\n", __func__, veth_emac_mem->tx_buf_mem_paddr); - veth_emac_mem->tx_buff_pool_base = - (uint32_t *)dma_zalloc_coherent(&pdata->pdev->dev, - sizeof(uint32_t) * VETH_TX_DESC_CNT, + /*transport minimum 4k*/ + veth_emac_mem->tx_buff_pool_base_va = + (uint32_t *)dma_alloc_coherent(&pdata->pdev->dev, + sizeof(uint32_t) * (VETH_TX_DESC_CNT * 4), &tx_buf_pool_paddr, - GFP_KERNEL); + GFP_KERNEL | GFP_DMA); - if (!veth_emac_mem->tx_buff_pool_base) { + if (!veth_emac_mem->tx_buff_pool_base_va) { VETH_IPA_DEBUG("%s: No memory for rx_buf_mem_va\n", __func__); goto free_tx_buff_pool_base; } - veth_emac_mem->tx_buff_pool_base[0] = veth_emac_mem->tx_buf_mem_paddr; - - for (i = 0; i < VETH_TX_DESC_CNT; i++) { - veth_emac_mem->tx_buff_pool_base[i] = - veth_emac_mem->tx_buff_pool_base[0] + - i*VETH_ETH_FRAME_LEN_IPA; - VETH_IPA_DEBUG( - "%s: veth_emac_mem->tx_buff_pool_base[%d] 0x%x\n", - __func__, i, veth_emac_mem->tx_buff_pool_base[i]); - } - veth_emac_mem->tx_buff_pool_base_pa = tx_buf_pool_paddr; //Allocate RX buffers @@ -166,28 +156,16 @@ int veth_alloc_emac_export_mem( VETH_IPA_DEBUG("%s: physical addr: rx_buf_mem_addr 0x%x\n", __func__, veth_emac_mem->rx_buf_mem_paddr); - veth_emac_mem->rx_buff_pool_base = + veth_emac_mem->rx_buff_pool_base_va = (uint32_t *)dma_zalloc_coherent(&pdata->pdev->dev, - sizeof(uint32_t) * VETH_RX_DESC_CNT, - &rx_buf_pool_paddr, - GFP_KERNEL); + sizeof(uint32_t) * VETH_RX_DESC_CNT*4, + &veth_emac_mem->rx_buff_pool_base_pa, + GFP_KERNEL | GFP_DMA); - if (!veth_emac_mem->rx_buff_pool_base) { + if (!veth_emac_mem->rx_buff_pool_base_va) { VETH_IPA_DEBUG("%s: No memory for rx_buf_mem_va\n", __func__); goto free_rx_buff_pool_base; } - - veth_emac_mem->rx_buff_pool_base[0] = veth_emac_mem->rx_buf_mem_paddr; - - for (i = 0; i < VETH_RX_DESC_CNT; i++) { - veth_emac_mem->rx_buff_pool_base[i] = - veth_emac_mem->rx_buff_pool_base[0] + - i*VETH_ETH_FRAME_LEN_IPA; - VETH_IPA_DEBUG( - "%s: veth_emac_mem->rx_buff_pool_base[%d] 0x%x\n", - __func__, i, veth_emac_mem->rx_buff_pool_base[i]); - } - veth_emac_mem->rx_buff_pool_base_pa = rx_buf_pool_paddr; return 0; @@ -196,7 +174,7 @@ int veth_alloc_emac_export_mem( free_rx_buff_pool_base: dma_free_coherent(&pdata->pdev->dev, VETH_ETH_FRAME_LEN_IPA * VETH_RX_DESC_CNT, - veth_emac_mem->rx_buff_pool_base, + veth_emac_mem->rx_buff_pool_base_va, rx_buf_pool_paddr); free_rx_buf_mem_va: dma_free_coherent(&pdata->pdev->dev, @@ -206,7 +184,7 @@ int veth_alloc_emac_export_mem( free_tx_buff_pool_base: dma_free_coherent(&pdata->pdev->dev, sizeof(uint32_t) * VETH_TX_DESC_CNT, - veth_emac_mem->tx_buff_pool_base, + veth_emac_mem->tx_buff_pool_base_va, tx_buf_pool_paddr); free_tx_buf_mem_va: dma_free_coherent(&pdata->pdev->dev, @@ -239,12 +217,18 @@ int veth_alloc_emac_export_mem( int veth_alloc_emac_dealloc_mem( struct veth_emac_export_mem *veth_emac_mem, struct veth_ipa_dev *pdata) { + /*1. Send stop offload to the BE + *2. Receive from BE + *4. Free the memory + *5. Close the HAB socket ? + */ + if (veth_emac_mem->rx_buf_mem_va) { VETH_IPA_DEBUG("%s: Freeing RX buf mem", __func__); dma_free_coherent(&pdata->pdev->dev, - VETH_ETH_FRAME_LEN_IPA * VETH_RX_DESC_CNT, - veth_emac_mem->rx_buf_mem_va, - veth_emac_mem->rx_buf_mem_paddr); + VETH_ETH_FRAME_LEN_IPA * VETH_RX_DESC_CNT, + veth_emac_mem->rx_buf_mem_va, + veth_emac_mem->rx_buf_mem_paddr); } else { VETH_IPA_ERROR("%s: RX buf not available", __func__); } @@ -252,9 +236,9 @@ int veth_alloc_emac_dealloc_mem( if (veth_emac_mem->tx_buf_mem_va) { VETH_IPA_DEBUG("%s: Freeing TX buf mem", __func__); dma_free_coherent(&pdata->pdev->dev, - VETH_ETH_FRAME_LEN_IPA * VETH_TX_DESC_CNT, - veth_emac_mem->tx_buf_mem_va, - veth_emac_mem->tx_buf_mem_paddr); + VETH_ETH_FRAME_LEN_IPA * VETH_TX_DESC_CNT, + veth_emac_mem->tx_buf_mem_va, + veth_emac_mem->tx_buf_mem_paddr); } else { VETH_IPA_ERROR("%s: TX buf not available", __func__); } @@ -262,9 +246,9 @@ int veth_alloc_emac_dealloc_mem( if (veth_emac_mem->rx_desc_mem_va) { VETH_IPA_DEBUG("%s: Freeing RX desc mem", __func__); dma_free_coherent(&pdata->pdev->dev, - sizeof(struct s_TX_NORMAL_DESC) * VETH_TX_DESC_CNT, - veth_emac_mem->rx_desc_mem_va, - veth_emac_mem->rx_desc_mem_paddr); + sizeof(struct s_TX_NORMAL_DESC) * VETH_TX_DESC_CNT, + veth_emac_mem->rx_desc_mem_va, + veth_emac_mem->rx_desc_mem_paddr); } else { VETH_IPA_ERROR("%s: RX desc mem not available", __func__); } @@ -272,29 +256,29 @@ int veth_alloc_emac_dealloc_mem( if (veth_emac_mem->tx_desc_mem_va) { VETH_IPA_DEBUG("%s: Freeing TX desc mem", __func__); dma_free_coherent(&pdata->pdev->dev, - sizeof(struct s_RX_NORMAL_DESC) * VETH_RX_DESC_CNT, - veth_emac_mem->tx_desc_mem_va, - veth_emac_mem->tx_desc_mem_paddr); + sizeof(struct s_RX_NORMAL_DESC) * VETH_RX_DESC_CNT, + veth_emac_mem->tx_desc_mem_va, + veth_emac_mem->tx_desc_mem_paddr); } else { VETH_IPA_ERROR("%s: TX desc mem not available", __func__); } - if (veth_emac_mem->rx_buff_pool_base) { + if (veth_emac_mem->rx_buff_pool_base_va) { VETH_IPA_DEBUG("%s: Freeing RX buff pool mem", __func__); dma_free_coherent(&pdata->pdev->dev, - sizeof(uint32_t) * VETH_RX_DESC_CNT, - veth_emac_mem->rx_buff_pool_base, - veth_emac_mem->rx_buff_pool_base_pa); + sizeof(uint32_t) * VETH_RX_DESC_CNT, + veth_emac_mem->rx_buff_pool_base_va, + veth_emac_mem->rx_buff_pool_base_pa); } else { VETH_IPA_ERROR("%s: RX buff pool base not available", __func__); } - if (veth_emac_mem->tx_buff_pool_base) { + if (veth_emac_mem->tx_buff_pool_base_va) { VETH_IPA_DEBUG("%s: Freeing TX buff pool mem", __func__); dma_free_coherent(&pdata->pdev->dev, - sizeof(uint32_t) * VETH_TX_DESC_CNT, - veth_emac_mem->tx_buff_pool_base, - veth_emac_mem->tx_buff_pool_base_pa); + sizeof(uint32_t) * VETH_TX_DESC_CNT, + veth_emac_mem->tx_buff_pool_base_va, + veth_emac_mem->tx_buff_pool_base_pa); } else { VETH_IPA_ERROR("%s: TX buff pool base not available", __func__); } @@ -316,7 +300,7 @@ int veth_emac_ipa_hab_init(int mmid) int ret = 0; int vc_id = 0; char *pdata_send; - char *pdata_recv; + uint32_t *pdata_recv; uint32_t veth_hab_pdata_size = 32; VETH_IPA_INFO("%s: Enter HAB init\n", __func__); @@ -358,7 +342,7 @@ int veth_emac_ipa_hab_init(int mmid) /*Receive ACK*/ memset(pdata_recv, 1, veth_hab_pdata_size); VETH_IPA_INFO("%s: Receiving ACK\n", __func__); - ret = habmm_socket_recv(vc_id, pdata_recv, &veth_hab_pdata_size, 0, 0); + ret = habmm_socket_recv(vc_id, &pdata_recv, &veth_hab_pdata_size, 0, 0); if (ret) { VETH_IPA_ERROR("%s: receive failed! ret %d, recv size %d\n", @@ -543,6 +527,99 @@ static int veth_emac_ipa_hab_export_rx_buf( return ret; } + + +/** emac_ipa_hab_export_tx_buf_pool() - This API is called + * for exporting the TX buf pool memory to BE driver in QNX host + * @vcid: The virtual channel ID between BE and FE driver + * + * @veth_emac_mem - Contains the virtual and physical addresses + * of the exported memory + */ +int emac_ipa_hab_export_tx_buf_pool( + int vc_id, struct veth_emac_export_mem *veth_emac_mem, + struct veth_ipa_dev *pdata) +{ + int ret = 0; + + VETH_IPA_DEBUG("%s: Export TX buf pool memory TO VC_ID %d\n", + __func__, vc_id); + + ret = habmm_export( + vc_id, + veth_emac_mem->tx_buff_pool_base_va, + sizeof(uint32_t) * VETH_TX_DESC_CNT * 4, + &veth_emac_mem->exp_id.tx_buf_pool_exp_id, + 0); + + if (ret) { + VETH_IPA_ERROR("%s: Export failed %d returned, export id %d\n", + __func__, + ret, + veth_emac_mem->exp_id.tx_buf_pool_exp_id); + ret = -1; + goto err; + } + + pr_info("%s: Export TX buf pool memory location %p %d\n", + __func__, + veth_emac_mem->tx_buff_pool_base_va, + veth_emac_mem->exp_id.tx_buf_pool_exp_id); + return ret; + +err: + veth_alloc_emac_dealloc_mem(veth_emac_mem, pdata); + return ret; +} + + +/** emac_ipa_hab_export_tx_buf_pool() - This API is called + * for exporting the TX buf pool memory to BE driver in QNX host + * @vcid: The virtual channel ID between BE and FE driver + * + * @veth_emac_mem - Contains the virtual and physical addresses + * of the exported memory + */ +int emac_ipa_hab_export_rx_buf_pool( + int vc_id, struct veth_emac_export_mem *veth_emac_mem, + struct veth_ipa_dev *pdata) +{ + int ret = 0; + + VETH_IPA_DEBUG("%s: Export RX buf pool memory TO VC_ID %d\n", + __func__, vc_id); + + ret = habmm_export( + vc_id, + veth_emac_mem->rx_buff_pool_base_va, + sizeof(uint32_t) * (VETH_RX_DESC_CNT * 4), + &veth_emac_mem->exp_id.rx_buf_pool_exp_id, + 0); + + if (ret) { + VETH_IPA_ERROR("%s: Export failed %d returned, export id %d\n", + __func__, + ret, + veth_emac_mem->exp_id.rx_buf_pool_exp_id); + ret = -1 + ; + goto err; + } + + pr_info("%s: Export RX buf pool memory location %p , %d\n", + __func__, + veth_emac_mem->rx_buff_pool_base_va, + veth_emac_mem->exp_id.rx_buf_pool_exp_id); + return ret; + +err: + veth_alloc_emac_dealloc_mem(veth_emac_mem, pdata); + return ret; +} + + + + /** veth_emac_ipa_send_exp_id() - This API is used to send the * export IDs of all the exported memory to the BE driver in * QNX host @@ -551,19 +628,22 @@ static int veth_emac_ipa_hab_export_rx_buf( * @veth_emac_mem - Contains the virtual and physical addresses * of the exported memory */ -static int veth_emac_ipa_send_exp_id( +int veth_emac_ipa_send_exp_id( int vc_id, struct veth_emac_export_mem *veth_emac_mem) { int ret = 0; ret = habmm_socket_send(vc_id, - &veth_emac_mem->exp_id, - sizeof(veth_emac_mem->exp_id), - NO_FLAGS); - + &veth_emac_mem->exp_id, + sizeof(veth_emac_mem->exp_id), + NO_FLAGS); + VETH_IPA_INFO("Sent export ids to the backend driver"); + VETH_IPA_INFO("TX Descriptor export id sent %x", + veth_emac_mem->exp_id.tx_desc_exp_id); if (ret) { VETH_IPA_ERROR("%s: Send failed failed %d returned\n", - __func__, ret); + __func__, + ret); ret = -1; return ret; } @@ -572,7 +652,7 @@ static int veth_emac_ipa_send_exp_id( } int veth_emac_init(struct veth_emac_export_mem *veth_emac_mem, - struct veth_ipa_dev *pdata) + struct veth_ipa_dev *pdata, bool smmu_s2_enb) { int ret = 0; @@ -616,6 +696,26 @@ int veth_emac_init(struct veth_emac_export_mem *veth_emac_mem, return -ENOMEM; } + ret = emac_ipa_hab_export_tx_buf_pool(veth_emac_mem->vc_id, + veth_emac_mem, + pdata); + + if (ret < 0) { + VETH_IPA_ERROR( + "HAB export of TX buff pool mem failed, returning error"); + return -ENOMEM; + } + + ret = emac_ipa_hab_export_rx_buf_pool(veth_emac_mem->vc_id, + veth_emac_mem, + pdata); + + if (ret < 0) { + VETH_IPA_ERROR( + "HAB export of RX buff pool mem failed, returning error"); + return -ENOMEM; + } + ret = veth_emac_ipa_send_exp_id(veth_emac_mem->vc_id, veth_emac_mem); diff --git a/drivers/platform/msm/veth_ipa/veth_emac_mgt.h b/drivers/platform/msm/veth_ipa/veth_emac_mgt.h index 9d7098ed86865e556e3c9d2c3c84fbfe18382913..a30a03498815681e1ac289b81ff3226d5141dd38 100644 --- a/drivers/platform/msm/veth_ipa/veth_emac_mgt.h +++ b/drivers/platform/msm/veth_ipa/veth_emac_mgt.h @@ -20,7 +20,8 @@ int veth_emac_init(struct veth_emac_export_mem *veth_emac_mem, - struct veth_ipa_dev *pdata); + struct veth_ipa_dev *pdata, + bool smmu_s2_enb); int veth_alloc_emac_export_mem(struct veth_emac_export_mem *veth_emac_mem, struct veth_ipa_dev *pdata); @@ -40,6 +41,16 @@ int veth_emac_open_notify(struct veth_emac_export_mem *veth_emac_mem, struct veth_ipa_dev *pdata); int veth_emac_ipa_setup_complete(struct veth_emac_export_mem *veth_emac_mem, struct veth_ipa_dev *pdata); +int emac_ipa_hab_export_tx_buf_pool(int vc_id, + struct veth_emac_export_mem *veth_emac_mem, + struct veth_ipa_dev *pdata); +int emac_ipa_hab_export_rx_buf_pool(int vc_id, + struct veth_emac_export_mem *veth_emac_mem, + struct veth_ipa_dev *pdata); +int veth_emac_ipa_send_exp_id(int vc_id, + struct veth_emac_export_mem *veth_emac_mem); + + #endif /* _VETH_EMAC_MGT_H_ */ diff --git a/drivers/platform/msm/veth_ipa/veth_ipa.c b/drivers/platform/msm/veth_ipa/veth_ipa.c index 18bf44f8450eaa119c5e752a44ce5bb1929379f6..e7622ea577094c45396f87d9ea5429a862593c14 100644 --- a/drivers/platform/msm/veth_ipa/veth_ipa.c +++ b/drivers/platform/msm/veth_ipa/veth_ipa.c @@ -330,13 +330,15 @@ int veth_set_ul_dl_smmu_ipa_params(struct veth_ipa_dev *pdata, return -EINVAL; } + /*Configure SGT for UL ring base*/ ul->ring_base_sgt = kzalloc(sizeof(ul->ring_base_sgt), GFP_KERNEL); if (!ul->ring_base_sgt) return -ENOMEM; - ret = dma_get_sgtable(&pdata->pdev->dev, ul->ring_base_sgt, + ret = dma_get_sgtable(&pdata->pdev->dev, + ul->ring_base_sgt, veth_emac_mem->rx_desc_mem_va, - ul->ring_base_iova, + veth_emac_mem->rx_desc_mem_paddr, (sizeof(struct s_RX_NORMAL_DESC) * VETH_RX_DESC_CNT)); if (ret) { @@ -346,8 +348,16 @@ int veth_set_ul_dl_smmu_ipa_params(struct veth_ipa_dev *pdata, return -EAGAIN; } + /*get pa*/ ul->ring_base_pa = sg_phys(ul->ring_base_sgt->sgl); + VETH_IPA_INFO( + "%s:\n ul->ring_base_sgt = 0x%px , ul->ring_base_pa =0x%lx\n", + __func__, + ul->ring_base_sgt, + ul->ring_base_pa); + + /*configure SGT for UL buff pool base*/ ul->buff_pool_base_sgt = kzalloc( sizeof(ul->buff_pool_base_sgt), GFP_KERNEL); @@ -356,11 +366,14 @@ int veth_set_ul_dl_smmu_ipa_params(struct veth_ipa_dev *pdata, return -ENOMEM; } - ret = dma_get_sgtable(&pdata->pdev->dev, ul->buff_pool_base_sgt, - veth_emac_mem->rx_buf_mem_va, - ul->buff_pool_base_iova, - (sizeof(struct s_RX_NORMAL_DESC) * - VETH_RX_DESC_CNT)); + ret = dma_get_sgtable(&pdata->pdev->dev, + ul->buff_pool_base_sgt, + veth_emac_mem->rx_buff_pool_base_va, + veth_emac_mem->rx_buff_pool_base_pa, + (sizeof(uint32_t) * VETH_RX_DESC_CNT * 4) + ); + /*using ipa dev node for buff pool*/ + /*overallocating to satisfy hab page alignment*/ if (ret) { VETH_IPA_ERROR("Failed to get IPA UL buff pool sgtable.\n"); kfree(ul->ring_base_sgt); @@ -370,12 +383,21 @@ int veth_set_ul_dl_smmu_ipa_params(struct veth_ipa_dev *pdata, } ul->buff_pool_base_pa = sg_phys(ul->buff_pool_base_sgt->sgl); + veth_emac_mem->rx_buff_pool_base_pa = ul->buff_pool_base_pa; + + VETH_IPA_INFO( + "%s:\n ul->buff_pool_base_sgt = 0x%px,ul->buff_pool_base_pa =0x%lx\n", + __func__, + ul->buff_pool_base_sgt, + ul->buff_pool_base_pa); + /*Configure SGT for DL ring base*/ dl->ring_base_sgt = kzalloc(sizeof(dl->ring_base_sgt), GFP_KERNEL); if (!dl->ring_base_sgt) return -ENOMEM; - ret = dma_get_sgtable(&pdata->pdev->dev, dl->ring_base_sgt, + ret = dma_get_sgtable(&pdata->pdev->dev, + dl->ring_base_sgt, veth_emac_mem->tx_desc_mem_va, veth_emac_mem->tx_desc_mem_paddr, (sizeof(struct s_TX_NORMAL_DESC) * @@ -390,17 +412,24 @@ int veth_set_ul_dl_smmu_ipa_params(struct veth_ipa_dev *pdata, } dl->ring_base_pa = sg_phys(dl->ring_base_sgt->sgl); + VETH_IPA_INFO( + "%s:\n dl->ring_base_sgt = 0x%px , dl->ring_base_pa =0x%lx\n", + __func__, + dl->ring_base_sgt, + dl->ring_base_pa); + /*configure SGT for DL buff pool base*/ dl->buff_pool_base_sgt = kzalloc( sizeof(dl->buff_pool_base_sgt), GFP_KERNEL); if (!dl->buff_pool_base_sgt) return -ENOMEM; - ret = dma_get_sgtable(&pdata->pdev->dev, dl->buff_pool_base_sgt, - veth_emac_mem->tx_buf_mem_va, - veth_emac_mem->tx_buf_mem_paddr, - (sizeof(struct s_TX_NORMAL_DESC) * - VETH_TX_DESC_CNT)); + ret = dma_get_sgtable(&pdata->pdev->dev, + dl->buff_pool_base_sgt, + veth_emac_mem->tx_buff_pool_base_va, + veth_emac_mem->tx_buff_pool_base_pa, + (sizeof(uint32_t) * VETH_TX_DESC_CNT * 4) + ); if (ret) { VETH_IPA_ERROR("Failed to get IPA DL buff pool sgtable.\n"); kfree(ul->ring_base_sgt); @@ -412,6 +441,13 @@ int veth_set_ul_dl_smmu_ipa_params(struct veth_ipa_dev *pdata, } dl->buff_pool_base_pa = sg_phys(dl->buff_pool_base_sgt->sgl); + veth_emac_mem->tx_buff_pool_base_pa = dl->buff_pool_base_pa; + + VETH_IPA_INFO( + "%s:dl->buff_pool_base_sgt = 0x%px , dl->buff_pool_base_pa =0x%lx", + __func__, + dl->buff_pool_base_sgt, + dl->buff_pool_base_pa); return ret; } @@ -433,24 +469,45 @@ static int veth_map_rx_tx_setup_info_params( else rx_setup_info->smmu_enabled = false; - rx_setup_info->smmu_enabled = false; /* RX Descriptor Base Physical Address*/ if (!rx_setup_info->smmu_enabled) rx_setup_info->ring_base_pa = veth_emac_mem->rx_desc_mem_paddr; - rx_setup_info->ring_base_pa = veth_emac_mem->rx_desc_mem_paddr; - /* RX Descriptor Base Virtual Address*/ - rx_setup_info->ring_base_iova = veth_emac_mem->rx_desc_mem_paddr; + if (rx_setup_info->smmu_enabled) + rx_setup_info->ring_base_iova = veth_emac_mem->rx_desc_mem_iova; /* RX Descriptor Count*/ rx_setup_info->ntn_ring_size = VETH_RX_DESC_CNT; - rx_setup_info->buff_pool_base_pa = - veth_emac_mem->rx_buff_pool_base_pa; + + /* RX Buf pool base*/ + if (!rx_setup_info->smmu_enabled) { + rx_setup_info->buff_pool_base_pa = + veth_emac_mem->rx_buff_pool_base_pa; + } + +/*this may cause issues after smmu?*/ rx_setup_info->buff_pool_base_iova = - veth_emac_mem->rx_buff_pool_base_pa; + veth_emac_mem->rx_buff_pool_base_iova; - /* Assign IPA to pa*/ + /*Map TX Buff Pool*/ + if (emac_emb_smmu_ctx.valid) { + /*store rx buf mem iova into buff pool addresses*/ + veth_emac_mem->rx_buff_pool_base_va[0] = + veth_emac_mem->rx_buf_mem_iova; + } else { + /*store rx buf mem p addr into buff pool addresse*/ + veth_emac_mem->rx_buff_pool_base_va[0] = + veth_emac_mem->rx_buf_mem_paddr; + } + for (i = 0; i < VETH_RX_DESC_CNT; i++) { + veth_emac_mem->rx_buff_pool_base_va[i] = + veth_emac_mem->rx_buff_pool_base_va[0] + + i*VETH_ETH_FRAME_LEN_IPA; + VETH_IPA_DEBUG( + "%s: veth_emac_mem->rx_buff_pool_base[%d] 0x%x\n", + __func__, i, veth_emac_mem->rx_buff_pool_base_va[i]); + } /*RX buffer Count*/ rx_setup_info->num_buffers = VETH_RX_DESC_CNT - 1; @@ -464,20 +521,26 @@ static int veth_map_rx_tx_setup_info_params( else tx_setup_info->smmu_enabled = false; - tx_setup_info->smmu_enabled = false; - - tx_setup_info->ring_base_pa = veth_emac_mem->tx_desc_mem_paddr; + if (!tx_setup_info->smmu_enabled) + tx_setup_info->ring_base_pa = + veth_emac_mem->tx_desc_mem_paddr; /* TX Descriptor Base Virtual Address*/ - tx_setup_info->ring_base_iova = veth_emac_mem->tx_desc_mem_paddr; + if (tx_setup_info->smmu_enabled) + tx_setup_info->ring_base_iova = + veth_emac_mem->tx_desc_mem_iova; /* TX Descriptor Count*/ tx_setup_info->ntn_ring_size = VETH_TX_DESC_CNT; - tx_setup_info->buff_pool_base_pa = veth_emac_mem->tx_buff_pool_base_pa; + /* Tx Buf pool base*/ + if (!tx_setup_info->smmu_enabled) { + tx_setup_info->buff_pool_base_pa = + veth_emac_mem->tx_buff_pool_base_pa; + } tx_setup_info->buff_pool_base_iova = - veth_emac_mem->tx_buff_pool_base_pa; + veth_emac_mem->tx_buff_pool_base_iova; /* TX buffer Count*/ tx_setup_info->num_buffers = VETH_TX_DESC_CNT-1; @@ -485,76 +548,93 @@ static int veth_map_rx_tx_setup_info_params( /* TX Frame length */ tx_setup_info->data_buff_size = VETH_ETH_FRAME_LEN_IPA; - /* Allocate RX Buff List*/ - //Todo: Free this correctly - rx_setup_info->data_buff_list = kcalloc(rx_setup_info->num_buffers, - sizeof(struct ntn_buff_smmu_map), GFP_KERNEL); - if (rx_setup_info->data_buff_list == NULL) { - ret = -ENOMEM; - return ret; + /*Map TX Buff Pool*/ + if (emac_emb_smmu_ctx.valid) { + /*store tx buf iova addr in buff pool addresses*/ + /*store tx buf p addr in buff pool addresses*/ + veth_emac_mem->tx_buff_pool_base_va[0] = + veth_emac_mem->tx_buf_mem_iova; + } else { + veth_emac_mem->tx_buff_pool_base_va[0] = + veth_emac_mem->tx_buf_mem_paddr; + } + for (i = 0; i < VETH_TX_DESC_CNT; i++) { + veth_emac_mem->tx_buff_pool_base_va[i] = + veth_emac_mem->tx_buff_pool_base_va[0] + + i*VETH_ETH_FRAME_LEN_IPA; + VETH_IPA_DEBUG( + "%s: veth_emac_mem->tx_buff_pool_base[%d] 0x%x\n", + __func__, i, veth_emac_mem->tx_buff_pool_base_va[i]); } - /* Allocate TX Buff List*/ - //Todo: Free this correctly - tx_setup_info->data_buff_list = kcalloc(tx_setup_info->num_buffers, + + /* Allocate and Populate RX Buff List*/ + rx_setup_info->data_buff_list = kcalloc(rx_setup_info->num_buffers, sizeof(struct ntn_buff_smmu_map), GFP_KERNEL); - if (tx_setup_info->data_buff_list == NULL) { + if (rx_setup_info->data_buff_list == NULL) { ret = -ENOMEM; return ret; } - /*Populate RX Buff list. */ - rx_setup_info->data_buff_list[0].iova = - veth_emac_mem->rx_buff_pool_base[0]; if (!rx_setup_info->smmu_enabled) { + /* this case we use p addr in rx_buff_pool_base[0]*/ rx_setup_info->data_buff_list[0].pa = - rx_setup_info->data_buff_list[0].iova; - //VETH_IPA_DEBUG - //("rx_setup_info->data_buff_list[0].pa = 0x%lx", - // rx_setup_info->data_buff_list[0].pa); + veth_emac_mem->rx_buf_mem_paddr; } else { rx_setup_info->data_buff_list[0].pa = veth_emac_mem->rx_buf_mem_paddr; - //VETH_IPA_DEBUG - //("rx_setup_info->data_buff_list[0].pa = 0x%lx", - // rx_setup_info->data_buff_list[0].pa); + + rx_setup_info->data_buff_list[0].iova = + veth_emac_mem->rx_buf_mem_iova; } + for (i = 0; i <= rx_setup_info->num_buffers; i++) { rx_setup_info->data_buff_list[i].iova = - veth_emac_mem->rx_buff_pool_base[i]; + rx_setup_info->data_buff_list[0].iova + + i*VETH_ETH_FRAME_LEN_IPA; rx_setup_info->data_buff_list[i].pa = - veth_emac_mem->rx_buff_pool_base[i]; - //VETH_IPA_DEBUG - //("rx_setup_info->data_buff_list[%d].pa = 0x%lx", - // i, rx_setup_info->data_buff_list[i].pa); + rx_setup_info->data_buff_list[0].pa + + i*VETH_ETH_FRAME_LEN_IPA; + // veth_emac_mem->rx_buf_mem_paddr[i]; + VETH_IPA_INFO("rx_setup_info->data_buff_list[%d].iova = 0x%lx", + i, rx_setup_info->data_buff_list[i].iova); + VETH_IPA_INFO("rx_setup_info->data_buff_list[%d].pa = 0x%lx", + i, rx_setup_info->data_buff_list[i].pa); } - /*Populate TX Buff list. */ - tx_setup_info->data_buff_list[0].iova = - veth_emac_mem->tx_buff_pool_base[0]; + + /* Allocate and Populate TX Buff List*/ + tx_setup_info->data_buff_list = kcalloc(tx_setup_info->num_buffers, + sizeof(struct ntn_buff_smmu_map), GFP_KERNEL); + if (tx_setup_info->data_buff_list == NULL) { + ret = -ENOMEM; + return ret; + } if (!tx_setup_info->smmu_enabled) { + /* this case we use p addr in rx_buff_pool_base[0]*/ tx_setup_info->data_buff_list[0].pa = - tx_setup_info->data_buff_list[0].iova; - //VETH_IPA_DEBUG - //("tx_setup_info->data_buff_list[0].pa = 0x%lx", - // tx_setup_info->data_buff_list[0].pa); + veth_emac_mem->tx_buf_mem_paddr; } else { tx_setup_info->data_buff_list[0].pa = veth_emac_mem->tx_buf_mem_paddr; - //VETH_IPA_INFO - //("tx_setup_info->data_buff_list[0].pa = 0x%lx", - //tx_setup_info->data_buff_list[0].pa); - } + tx_setup_info->data_buff_list[0].iova = + veth_emac_mem->tx_buf_mem_iova; + } for (i = 0; i <= tx_setup_info->num_buffers; i++) { tx_setup_info->data_buff_list[i].iova = - veth_emac_mem->tx_buff_pool_base[i]; + tx_setup_info->data_buff_list[0].iova + + i*VETH_ETH_FRAME_LEN_IPA; tx_setup_info->data_buff_list[i].pa = - veth_emac_mem->tx_buff_pool_base[i]; - //VETH_IPA_DEBUG( - //"tx_setup_info->data_buff_list[%d].pa = 0x%lx", - // i,tx_setup_info->data_buff_list[i].pa); + tx_setup_info->data_buff_list[0].pa + + i*VETH_ETH_FRAME_LEN_IPA; + VETH_IPA_INFO("tx_setup_info->data_buff_list[%d].iova = 0x%lx", + i, + tx_setup_info->data_buff_list[i].iova); + VETH_IPA_INFO("tx_setup_info->data_buff_list[%d].pa = 0x%lx", + i, + tx_setup_info->data_buff_list[i].pa); } return ret; @@ -572,10 +652,10 @@ int veth_ipa_offload_connect(struct veth_ipa_dev *pdata) int ret = 0; - /* Hard code SMMU Disable for PHASE 1*/ - emac_emb_smmu_ctx.valid = false; - - VETH_IPA_DEBUG("%s - begin\n", __func__); + /* Hard code SMMU Enable for PHASE 1*/ + emac_emb_smmu_ctx.valid = true; + VETH_IPA_DEBUG("%s - begin smmu_s2_enb=%d\n", __func__, + emac_emb_smmu_ctx.valid); if (!pdata) { VETH_IPA_ERROR("Null Param %s\n", __func__); @@ -947,7 +1027,9 @@ static void veth_ipa_offload_event_handler( VETH_IPA_DEBUG("%s - veth_emac_init\n", __func__); - ret = veth_emac_init(&(pdata->veth_emac_mem), pdata); + ret = veth_emac_init(&(pdata->veth_emac_mem), + pdata, + emac_emb_smmu_ctx.valid); if (ret) { pr_err("%s: veth_alloc_emac_export_mem failed error %d", __func__, @@ -1361,53 +1443,81 @@ static int veth_ipa_uc_ready(struct veth_ipa_dev *pdata) */ static int veth_ipa_emac_evt_mgmt(void *arg) { + /*Wait on HAV receive here*/ int ret = 0; int timeout_ms = 100; - int pdata_recv = 0; - int pdate_size = sizeof(pdata_recv); + struct emac_hab_mm_message pdata_recv; + //veth_emac_import_iova msg; + int pdata_size = sizeof(pdata_recv); struct veth_ipa_dev *pdata = (struct veth_ipa_dev *)arg; - + //memset(&msg, 0, sizeof(struct veth_emac_import_iova) ); VETH_IPA_INFO("%s: vc_id %d\n", __func__, pdata->veth_emac_mem.vc_id); while (1) { ret = habmm_socket_recv(pdata->veth_emac_mem.vc_id, &pdata_recv, - &pdate_size, + &pdata_size, timeout_ms, - HABMM_SOCKET_RECV_FLAGS_NON_BLOCKING); - if (!ret) { - VETH_IPA_INFO("%s: pdata_recv %d\n", __func__, - pdata_recv); - switch (pdata_recv) { - case EV_IPA_EMAC_INIT: - if (!pdata->prv_ipa.emac_init) { - VETH_IPA_INFO("EMAC_INIT\n"); - veth_ipa_emac_init_done_cb(pdata); - pdata->prv_ipa.emac_init = true; - } - break; - case EV_IPA_EMAC_SETUP: - VETH_IPA_INFO("EMAC_SETUP event received\n"); - veth_ipa_emac_setup_done_cb(pdata); - break; - case EV_PHY_LINK_UP: - VETH_IPA_INFO("EMAC_PHY_LINK_UP event received\n"); - veth_ipa_emac_link_up_cb(pdata); - break; - case EV_START_OFFLOAD: - VETH_IPA_INFO("EV_START_OFFLOAD event received\n"); - veth_ipa_emac_start_offload_cb(pdata); - break; - case EV_EMAC_DEINIT: - VETH_IPA_INFO("EMAC_DEINIT event received\n"); - veth_ipa_emac_deinit_cb(pdata); - pdata->prv_ipa.emac_init = false; - break; - default: - VETH_IPA_ERROR("Unknown event received\n"); - break; - } + 0x0); + VETH_IPA_INFO("EVENT ID Received: %x", pdata_recv.event_id); + if (!ret) { + VETH_IPA_INFO("%s: msg->event_id %d\n", __func__, pdata_recv); + switch (pdata_recv.event_id) { + case EV_IPA_EMAC_INIT: + /* To avoid spurious events, possibly not required once state + * machine is available + */ + if (!pdata->prv_ipa.emac_init) { + VETH_IPA_INFO("EMAC_INIT event received\n"); + pr_info("%s: emac_init set to true ", __func__); + veth_ipa_emac_init_done_cb(pdata); + pdata->prv_ipa.emac_init = true; + } + break; + case EV_IPA_EMAC_SETUP: + /*use memcpy_s later instead*/ + pdata->veth_emac_mem.tx_desc_mem_iova = + (dma_addr_t) + pdata_recv.msg_type.iova.tx_desc_mem_iova; + pdata->veth_emac_mem.rx_desc_mem_iova = + (dma_addr_t) + pdata_recv.msg_type.iova.rx_desc_mem_iova; + pdata->veth_emac_mem.tx_buf_mem_iova = + (dma_addr_t) + pdata_recv.msg_type.iova.tx_buf_mem_iova; + pdata->veth_emac_mem.rx_buf_mem_iova = + (dma_addr_t) + pdata_recv.msg_type.iova.rx_buf_mem_iova; + pdata->veth_emac_mem.tx_buff_pool_base_iova = + (dma_addr_t) + pdata_recv.msg_type.iova.tx_buf_pool_base_iova; + pdata->veth_emac_mem.rx_buff_pool_base_iova = + (dma_addr_t) + pdata_recv.msg_type.iova.rx_buf_pool_base_iova; + VETH_IPA_INFO("EMAC_SETUP event received\n"); + VETH_IPA_INFO("union received: %x", + pdata->veth_emac_mem.tx_buff_pool_base_iova); + veth_ipa_emac_setup_done_cb(pdata); + break; + case EV_PHY_LINK_UP: + VETH_IPA_INFO("EMAC_PHY_LINK_UP event received\n"); + veth_ipa_emac_link_up_cb(pdata); + break; + case EV_START_OFFLOAD: + VETH_IPA_INFO("EV_START_OFFLOAD event received\n"); + veth_ipa_emac_start_offload_cb(pdata); + break; + case EV_EMAC_DEINIT: + VETH_IPA_INFO("EMAC_DEINIT event received\n"); + veth_ipa_emac_deinit_cb(pdata); + pdata->prv_ipa.emac_init = false; + break; + default: + VETH_IPA_ERROR("Unknown event received\n"); + break; } } +} + //kfree(msg); return 0; } /** @@ -1817,7 +1927,7 @@ static int veth_ipa_stop(struct net_device *net) VETH_IPA_DEBUG("network device stopped\n"); if (pdata->prv_ipa.ipa_uc_ready) { - pr_info("%s: veth_ipa_stop veth_disable_ipa_offload", + pr_info("%s: veth_disable_ipa_offload", __func__); veth_disable_ipa_offload(pdata); ipa_uc_offload_dereg_rdyCB(IPA_UC_NTN); @@ -1835,7 +1945,7 @@ static int veth_ipa_stop(struct net_device *net) //HAB call for BE driver in the mutex lock causes a deadlock ret = veth_emac_stop_offload(&(pdata->veth_emac_mem), pdata); if (ret < 0) { - pr_err("%s: veth_emac_stop_offload failed", __func__); + pr_err("%s: failed", __func__); return ret; } diff --git a/drivers/platform/msm/veth_ipa/veth_ipa.h b/drivers/platform/msm/veth_ipa/veth_ipa.h index 6718cd197a06ed7f137318e845209b06ac8cf8bb..a1d809ae717bd2fdc5b7814723bd9592c01fadce 100644 --- a/drivers/platform/msm/veth_ipa/veth_ipa.h +++ b/drivers/platform/msm/veth_ipa/veth_ipa.h @@ -119,7 +119,7 @@ static void *ipa_veth_logbuf; #define VETH_TX_DESC_CNT 256 /*la uses 128*/ /*IPA can support 2KB max pkt length*/ -#define VETH_ETH_FRAME_LEN_IPA (1<<11) +#define VETH_ETH_FRAME_LEN_IPA (1<<12) #define VETH_IPA_LOCK() mutex_lock(&pdata->prv_ipa.ipa_lock) #define VETH_IPA_UNLOCK() mutex_unlock(&pdata->prv_ipa.ipa_lock) @@ -154,11 +154,13 @@ struct s_TX_NORMAL_DESC { struct veth_emac_exp { - uint32_t tx_desc_exp_id; - uint32_t rx_desc_exp_id; - uint32_t tx_buff_exp_id; - uint32_t rx_buff_exp_id; - int event_id; + uint32_t tx_desc_exp_id; + uint32_t rx_desc_exp_id; + uint32_t tx_buff_exp_id; + uint32_t rx_buff_exp_id; + uint32_t rx_buf_pool_exp_id; + uint32_t tx_buf_pool_exp_id; + int event_id; }; struct veth_emac_export_mem { @@ -172,7 +174,7 @@ struct veth_emac_export_mem { dma_addr_t tx_buf_mem_paddr; dma_addr_t tx_buf_mem_iova; - uint32_t *tx_buff_pool_base; + uint32_t *tx_buff_pool_base_va; dma_addr_t tx_buff_pool_base_iova; dma_addr_t tx_buff_pool_base_pa; @@ -186,7 +188,7 @@ struct veth_emac_export_mem { dma_addr_t rx_buf_mem_paddr; dma_addr_t rx_buf_mem_iova; - uint32_t *rx_buff_pool_base; + uint32_t *rx_buff_pool_base_va; dma_addr_t rx_buff_pool_base_iova; dma_addr_t rx_buff_pool_base_pa; @@ -372,9 +374,9 @@ struct veth_ipa_dev { enum veth_ipa_state state; void (*device_ready_notify)(void); - #ifdef VETH_PM_ENB + #ifdef VETH_PM_ENB u32 pm_hdl; - #endif + #endif bool is_vlan_mode; /* Status of EMAC Device*/ @@ -405,6 +407,26 @@ struct emac_emb_smmu_cb_ctx { int ret; }; + +/* Maintain Order same on FE*/ +struct emac_ipa_iovas { + /*iova addresses*/ + void *tx_desc_mem_iova; + void *tx_buf_mem_iova; + void *tx_buf_pool_base_iova; + void *rx_desc_mem_iova; + void *rx_buf_mem_iova; + void *rx_buf_pool_base_iova; +}; + +struct emac_hab_mm_message { + int event_id; + union msg_type { + struct emac_ipa_iovas iova; + } msg_type; +}; + + #define GET_MEM_PDEV_DEV (emac_emb_smmu_ctx.valid ? \ &emac_emb_smmu_ctx.smmu_pdev->dev : ¶ms->pdev->dev) diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c index 59f3a37a44d7a68298ec81270b86c633244b9e85..8db2dc05b8cf29970abb3c99247cd953c73d21de 100644 --- a/drivers/platform/x86/asus-nb-wmi.c +++ b/drivers/platform/x86/asus-nb-wmi.c @@ -517,9 +517,33 @@ static struct asus_wmi_driver asus_nb_wmi_driver = { .detect_quirks = asus_nb_wmi_quirks, }; +static const struct dmi_system_id asus_nb_wmi_blacklist[] __initconst = { + { + /* + * asus-nb-wm adds no functionality. The T100TA has a detachable + * USB kbd, so no hotkeys and it has no WMI rfkill; and loading + * asus-nb-wm causes the camera LED to turn and _stay_ on. + */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100TA"), + }, + }, + { + /* The Asus T200TA has the same issue as the T100TA */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T200TA"), + }, + }, + {} /* Terminating entry */ +}; static int __init asus_nb_wmi_init(void) { + if (dmi_check_system(asus_nb_wmi_blacklist)) + return -ENODEV; + return asus_wmi_register_driver(&asus_nb_wmi_driver); } diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c index 06a3c1ef8eeee65455dc059b619b0580420eec5a..952544ca0d84dae176c4ca548f49d737f62d782a 100644 --- a/drivers/platform/x86/hp-wmi.c +++ b/drivers/platform/x86/hp-wmi.c @@ -474,8 +474,14 @@ static ssize_t postcode_show(struct device *dev, struct device_attribute *attr, static ssize_t als_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - u32 tmp = simple_strtoul(buf, NULL, 10); - int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, HPWMI_WRITE, &tmp, + u32 tmp; + int ret; + + ret = kstrtou32(buf, 10, &tmp); + if (ret) + return ret; + + ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, HPWMI_WRITE, &tmp, sizeof(tmp), sizeof(tmp)); if (ret) return ret < 0 ? ret : -EINVAL; diff --git a/drivers/power/reset/msm-poweroff.c b/drivers/power/reset/msm-poweroff.c index c22033ad2b9e193f3dd6d21f1a4d5a456d4d8bb7..57f6eae635697103e74a62f50d884b1ca78cbe76 100644 --- a/drivers/power/reset/msm-poweroff.c +++ b/drivers/power/reset/msm-poweroff.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include @@ -292,8 +293,8 @@ static void msm_restart_prepare(const char *cmd) * Write download mode flags if restart_mode says so * Kill download mode if master-kill switch is set */ - - set_dload_mode(download_mode && + if (!is_kdump_kernel()) + set_dload_mode(download_mode && (in_panic || restart_mode == RESTART_DLOAD)); #endif @@ -694,8 +695,8 @@ static int msm_restart_probe(struct platform_device *pdev) if (scm_is_call_available(SCM_SVC_PWR, SCM_IO_DEASSERT_PS_HOLD) > 0) scm_deassert_ps_hold_supported = true; - - set_dload_mode(download_mode); + if (!is_kdump_kernel()) + set_dload_mode(download_mode); if (!download_mode) scm_disable_sdi(); diff --git a/drivers/power/reset/vexpress-poweroff.c b/drivers/power/reset/vexpress-poweroff.c index e9e749f87517d871cf2d4113568a44fb9c0b5d09..8fb43c4438e609ba48ad78f432ce292751f4f525 100644 --- a/drivers/power/reset/vexpress-poweroff.c +++ b/drivers/power/reset/vexpress-poweroff.c @@ -150,6 +150,7 @@ static struct platform_driver vexpress_reset_driver = { .driver = { .name = "vexpress-reset", .of_match_table = vexpress_reset_of_match, + .suppress_bind_attrs = true, }, }; diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig index e0a677bcc01f06297f07e2c866f02c4b146497ba..94e2c707f9a9895dde8bbd725ed9c939b3ed1b88 100644 --- a/drivers/power/supply/Kconfig +++ b/drivers/power/supply/Kconfig @@ -530,7 +530,7 @@ config CHARGER_BQ24257 tristate "TI BQ24250/24251/24257 battery charger driver" depends on I2C depends on GPIOLIB || COMPILE_TEST - depends on REGMAP_I2C + select REGMAP_I2C help Say Y to enable support for the TI BQ24250, BQ24251, and BQ24257 battery chargers. diff --git a/drivers/power/supply/lp8788-charger.c b/drivers/power/supply/lp8788-charger.c index 0f3432795f3c26b5d4d57bb8e74721c46181aeb4..b8f7dac7ac3feb6d3530dfe9ebe4416e042e93f3 100644 --- a/drivers/power/supply/lp8788-charger.c +++ b/drivers/power/supply/lp8788-charger.c @@ -600,27 +600,14 @@ static void lp8788_setup_adc_channel(struct device *dev, return; /* ADC channel for battery voltage */ - chan = iio_channel_get(dev, pdata->adc_vbatt); + chan = devm_iio_channel_get(dev, pdata->adc_vbatt); pchg->chan[LP8788_VBATT] = IS_ERR(chan) ? NULL : chan; /* ADC channel for battery temperature */ - chan = iio_channel_get(dev, pdata->adc_batt_temp); + chan = devm_iio_channel_get(dev, pdata->adc_batt_temp); pchg->chan[LP8788_BATT_TEMP] = IS_ERR(chan) ? NULL : chan; } -static void lp8788_release_adc_channel(struct lp8788_charger *pchg) -{ - int i; - - for (i = 0; i < LP8788_NUM_CHG_ADC; i++) { - if (!pchg->chan[i]) - continue; - - iio_channel_release(pchg->chan[i]); - pchg->chan[i] = NULL; - } -} - static ssize_t lp8788_show_charger_status(struct device *dev, struct device_attribute *attr, char *buf) { @@ -747,7 +734,6 @@ static int lp8788_charger_remove(struct platform_device *pdev) lp8788_irq_unregister(pdev, pchg); sysfs_remove_group(&pdev->dev.kobj, &lp8788_attr_group); lp8788_psy_unregister(pchg); - lp8788_release_adc_channel(pchg); return 0; } diff --git a/drivers/power/supply/qcom/qpnp-fg-gen4.c b/drivers/power/supply/qcom/qpnp-fg-gen4.c index 85a62678b79b0f3cf43dd57210f69729db26c2cf..892071fa67ccd8b77d544cc429f7cf88612c6bf9 100644 --- a/drivers/power/supply/qcom/qpnp-fg-gen4.c +++ b/drivers/power/supply/qcom/qpnp-fg-gen4.c @@ -1464,7 +1464,7 @@ static int fg_gen4_adjust_ki_coeff_full_soc(struct fg_gen4_chip *chip, int batt_temp) { struct fg_dev *fg = &chip->fg; - int rc, ki_coeff_full_soc_norm, ki_coeff_full_soc_low; + int rc, ki_coeff_full_soc_norm = 0, ki_coeff_full_soc_low = 0; u8 val; if ((batt_temp < 0) || diff --git a/drivers/power/supply/smb347-charger.c b/drivers/power/supply/smb347-charger.c index 072c5189bd6d10acc40e2fbf22f7cba55b62eb73..0655dbdc7000d6aab02332d7b2ab5c69b57b1f54 100644 --- a/drivers/power/supply/smb347-charger.c +++ b/drivers/power/supply/smb347-charger.c @@ -1141,6 +1141,7 @@ static bool smb347_volatile_reg(struct device *dev, unsigned int reg) switch (reg) { case IRQSTAT_A: case IRQSTAT_C: + case IRQSTAT_D: case IRQSTAT_E: case IRQSTAT_F: case STAT_A: diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c index ecd71efe8ea00a3bdf8a36596083b306420489d5..f15f6d1e1070ade42869c4ef4db18aef7ba8139f 100644 --- a/drivers/rapidio/devices/rio_mport_cdev.c +++ b/drivers/rapidio/devices/rio_mport_cdev.c @@ -904,6 +904,11 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode, rmcd_error("pinned %ld out of %ld pages", pinned, nr_pages); ret = -EFAULT; + /* + * Set nr_pages up to mean "how many pages to unpin, in + * the error handler: + */ + nr_pages = pinned; goto err_pg; } diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c index 8f4fa1a52f0578c05f6bd3046a90982d65e20453..d6372470e5bea91c1ba9898dc3fd4edb35a50bd0 100644 --- a/drivers/remoteproc/remoteproc_core.c +++ b/drivers/remoteproc/remoteproc_core.c @@ -1432,6 +1432,7 @@ struct rproc *rproc_alloc(struct device *dev, const char *name, rproc->dev.type = &rproc_type; rproc->dev.class = &rproc_class; rproc->dev.driver_data = rproc; + idr_init(&rproc->notifyids); /* Assign a unique device index and name */ rproc->index = ida_simple_get(&rproc_dev_index, 0, 0, GFP_KERNEL); @@ -1450,8 +1451,6 @@ struct rproc *rproc_alloc(struct device *dev, const char *name, mutex_init(&rproc->lock); - idr_init(&rproc->notifyids); - INIT_LIST_HEAD(&rproc->carveouts); INIT_LIST_HEAD(&rproc->mappings); INIT_LIST_HEAD(&rproc->traces); diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c index 2b051b54ae4b5f251e2ee62043dc148681e496cc..ade1553c6db12c6d267e37a4703a16704023e1e1 100644 --- a/drivers/rpmsg/qcom_glink_native.c +++ b/drivers/rpmsg/qcom_glink_native.c @@ -2053,6 +2053,10 @@ struct qcom_glink *qcom_glink_native_probe(struct device *dev, glink->irq = irq; + ret = enable_irq_wake(irq); + if (ret < 0) + dev_err(dev, "enable_irq_wake() failed on %d\n", irq); + size = of_property_count_u32_elems(dev->of_node, "cpu-affinity"); if (size > 0) { arr = kmalloc_array(size, sizeof(u32), GFP_KERNEL); diff --git a/drivers/rpmsg/qcom_smd.c b/drivers/rpmsg/qcom_smd.c index 32106e96ac6d5a9f18d5ae0617fcb10c04d444a9..c964c5a8993276430205306bf86cb4e3aceb48eb 100644 --- a/drivers/rpmsg/qcom_smd.c +++ b/drivers/rpmsg/qcom_smd.c @@ -1511,8 +1511,9 @@ static int qcom_smd_parse_edge(struct device *dev, } ret = devm_request_irq(dev, irq, - qcom_smd_edge_intr, IRQF_TRIGGER_RISING, - node->name, edge); + qcom_smd_edge_intr, IRQF_TRIGGER_RISING + | IRQF_NO_SUSPEND, node->name, edge); + if (ret) { dev_err(dev, "failed to request smd irq\n"); return ret; diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h index 29d6b5222f1cdb6420c9632f9825597bb524f295..0f8d13288611ec6a6ee9fc846166361a6564a919 100644 --- a/drivers/s390/cio/qdio.h +++ b/drivers/s390/cio/qdio.h @@ -377,7 +377,6 @@ static inline int multicast_outbound(struct qdio_q *q) extern u64 last_ai_time; /* prototypes for thin interrupt */ -void qdio_setup_thinint(struct qdio_irq *irq_ptr); int qdio_establish_thinint(struct qdio_irq *irq_ptr); void qdio_shutdown_thinint(struct qdio_irq *irq_ptr); void tiqdio_add_input_queues(struct qdio_irq *irq_ptr); diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c index d0090c5c88e74c696f152a2cbc6dcb9d96deff98..a64615a10352b411c5adbf80c414dc3e6f2f1c08 100644 --- a/drivers/s390/cio/qdio_setup.c +++ b/drivers/s390/cio/qdio_setup.c @@ -479,7 +479,6 @@ int qdio_setup_irq(struct qdio_initialize *init_data) setup_queues(irq_ptr, init_data); setup_qib(irq_ptr, init_data); - qdio_setup_thinint(irq_ptr); set_impl_params(irq_ptr, init_data->qib_param_field_format, init_data->qib_param_field, init_data->input_slib_elements, diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c index 831a3a0a2837bd3bca81cd47e00b0f9d90a2f5b2..4dc1108069d4ae3330c98b196c785de31421ff31 100644 --- a/drivers/s390/cio/qdio_thinint.c +++ b/drivers/s390/cio/qdio_thinint.c @@ -270,17 +270,19 @@ int __init tiqdio_register_thinints(void) int qdio_establish_thinint(struct qdio_irq *irq_ptr) { + int rc; + if (!is_thinint_irq(irq_ptr)) return 0; - return set_subchannel_ind(irq_ptr, 0); -} -void qdio_setup_thinint(struct qdio_irq *irq_ptr) -{ - if (!is_thinint_irq(irq_ptr)) - return; irq_ptr->dsci = get_indicator(); DBF_HEX(&irq_ptr->dsci, sizeof(void *)); + + rc = set_subchannel_ind(irq_ptr, 0); + if (rc) + put_indicator(irq_ptr->dsci); + + return rc; } void qdio_shutdown_thinint(struct qdio_irq *irq_ptr) diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 94d1bcc83fa2e8960f3ab6085bc1531a80c843d5..119238faf1ac4984900d192fde331e5add847158 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -1594,6 +1594,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port) { struct zfcp_qdio *qdio = wka_port->adapter->qdio; struct zfcp_fsf_req *req; + unsigned long req_id = 0; int retval = -EIO; spin_lock_irq(&qdio->req_q_lock); @@ -1616,6 +1617,8 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port) hton24(req->qtcb->bottom.support.d_id, wka_port->d_id); req->data = wka_port; + req_id = req->req_id; + zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); retval = zfcp_fsf_req_send(req); if (retval) @@ -1623,7 +1626,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port) out: spin_unlock_irq(&qdio->req_q_lock); if (!retval) - zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req->req_id); + zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req_id); return retval; } @@ -1649,6 +1652,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port) { struct zfcp_qdio *qdio = wka_port->adapter->qdio; struct zfcp_fsf_req *req; + unsigned long req_id = 0; int retval = -EIO; spin_lock_irq(&qdio->req_q_lock); @@ -1671,6 +1675,8 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port) req->data = wka_port; req->qtcb->header.port_handle = wka_port->handle; + req_id = req->req_id; + zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); retval = zfcp_fsf_req_send(req); if (retval) @@ -1678,7 +1684,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port) out: spin_unlock_irq(&qdio->req_q_lock); if (!retval) - zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req->req_id); + zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req_id); return retval; } diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c index 421fe869a11ef0cbdb8130aa7b8ebdffbc775236..ef9d907f2df5ce069a3f73f13f22b7d92a4dc703 100644 --- a/drivers/scsi/arm/acornscsi.c +++ b/drivers/scsi/arm/acornscsi.c @@ -2914,8 +2914,10 @@ static int acornscsi_probe(struct expansion_card *ec, const struct ecard_id *id) ashost->base = ecardm_iomap(ec, ECARD_RES_MEMC, 0, 0); ashost->fast = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0); - if (!ashost->base || !ashost->fast) + if (!ashost->base || !ashost->fast) { + ret = -ENOMEM; goto out_put; + } host->irq = ec->irq; ashost->host = host; diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c index b167411580bad9250e70dc2c887af47241b24c17..dd918c5d5cb8db1ffd5fa7d099f3b6b8002844d8 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_main.c +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c @@ -655,12 +655,13 @@ static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy) struct hisi_hba *hisi_hba = sas_ha->lldd_ha; struct hisi_sas_phy *phy = sas_phy->lldd_phy; struct asd_sas_port *sas_port = sas_phy->port; - struct hisi_sas_port *port = to_hisi_sas_port(sas_port); + struct hisi_sas_port *port; unsigned long flags; if (!sas_port) return; + port = to_hisi_sas_port(sas_port); spin_lock_irqsave(&hisi_hba->lock, flags); port->port_attached = 1; port->id = phy->port_id; diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index 07c23bbd968c51f7b2d4624ca8b092f23caeeba6..aff868afe68d09d4f3ba2500364147f4ff49fc62 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c @@ -429,6 +429,8 @@ static int ibmvscsi_reenable_crq_queue(struct crq_queue *queue, int rc = 0; struct vio_dev *vdev = to_vio_dev(hostdata->dev); + set_adapter_info(hostdata); + /* Re-enable the CRQ */ do { if (rc) @@ -2299,16 +2301,12 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id) static int ibmvscsi_remove(struct vio_dev *vdev) { struct ibmvscsi_host_data *hostdata = dev_get_drvdata(&vdev->dev); - unsigned long flags; srp_remove_host(hostdata->host); scsi_remove_host(hostdata->host); purge_requests(hostdata, DID_ERROR); - - spin_lock_irqsave(hostdata->host->host_lock, flags); release_event_pool(&hostdata->pool, hostdata); - spin_unlock_irqrestore(hostdata->host->host_lock, flags); ibmvscsi_release_crq_queue(&hostdata->queue, hostdata, max_events); diff --git a/drivers/scsi/iscsi_boot_sysfs.c b/drivers/scsi/iscsi_boot_sysfs.c index d453667612f887c9949b7d4486e44ef6b5c751dd..15d64f96e623c298c9857ad0bce2cef5a78bb7ee 100644 --- a/drivers/scsi/iscsi_boot_sysfs.c +++ b/drivers/scsi/iscsi_boot_sysfs.c @@ -360,7 +360,7 @@ iscsi_boot_create_kobj(struct iscsi_boot_kset *boot_kset, boot_kobj->kobj.kset = boot_kset->kset; if (kobject_init_and_add(&boot_kobj->kobj, &iscsi_boot_ktype, NULL, name, index)) { - kfree(boot_kobj); + kobject_put(&boot_kobj->kobj); return NULL; } boot_kobj->data = data; diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 4c84c2ae1112dc955eddf4600615b1af56d05c6f..db1111f7e85aee464ba7727dc9fff1e03a190c0a 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -7913,6 +7913,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, spin_lock_irq(shost->host_lock); if (ndlp->nlp_flag & NLP_IN_DEV_LOSS) { spin_unlock_irq(shost->host_lock); + if (newnode) + lpfc_nlp_put(ndlp); goto dropit; } spin_unlock_irq(shost->host_lock); diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index 2c8e144d486148807355f4e75a5a359f032505cf..33a005c9bd65e7dc916b48e22a8a066d61d1654f 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -3263,7 +3263,9 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc) ioc->scsi_lookup = NULL; } kfree(ioc->hpr_lookup); + ioc->hpr_lookup = NULL; kfree(ioc->internal_lookup); + ioc->internal_lookup = NULL; if (ioc->chain_lookup) { for (i = 0; i < ioc->chain_depth; i++) { if (ioc->chain_lookup[i].chain_buffer) diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c index 94f3829b1974a2042c500a78fdd78139ee97da29..fb6439bc1d9a9022a9c40136a8e5d6112c9d16e6 100644 --- a/drivers/scsi/qedi/qedi_iscsi.c +++ b/drivers/scsi/qedi/qedi_iscsi.c @@ -1007,7 +1007,8 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep) if (qedi_ep->state == EP_STATE_OFLDCONN_START) goto ep_exit_recover; - flush_work(&qedi_ep->offload_work); + if (qedi_ep->state != EP_STATE_OFLDCONN_NONE) + flush_work(&qedi_ep->offload_work); if (qedi_ep->conn) { qedi_conn = qedi_ep->conn; @@ -1224,6 +1225,10 @@ static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data) } iscsi_cid = (u32)path_data->handle; + if (iscsi_cid >= qedi->max_active_conns) { + ret = -EINVAL; + goto set_path_exit; + } qedi_ep = qedi->ep_tbl[iscsi_cid]; QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "iscsi_cid=0x%x, qedi_ep=%p\n", iscsi_cid, qedi_ep); diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 5e8ae510aef80f1d4e608131bbaad988c5400007..9d9737114dcf0c7c424bd1557c1506feadae3fb7 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -2998,7 +2998,7 @@ qla24xx_abort_command(srb_t *sp) ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c, "Entered %s.\n", __func__); - if (vha->flags.qpairs_available && sp->qpair) + if (sp->qpair) req = sp->qpair->req; if (ql2xasynctmfenable) diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index d4024015f859f1b6183d71c51fca502acc0386e7..ea60c6e603c060f59e5666b22a24330c2e3f787b 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -5824,6 +5824,7 @@ qla2x00_do_dpc(void *data) if (do_reset && !(test_and_set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags))) { + base_vha->flags.online = 1; ql_dbg(ql_dbg_dpc, base_vha, 0x4007, "ISP abort scheduled.\n"); if (ha->isp_ops->abort_isp(base_vha)) { diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index e08ac431bc496f10487631dfe6af1905ee390c90..e7aee067b0565fb6d0f99ec369d3df41beb2a103 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c @@ -937,6 +937,7 @@ static ssize_t tcm_qla2xxx_tpg_enable_store(struct config_item *item, atomic_set(&tpg->lport_tpg_enabled, 0); qlt_stop_phase1(vha->vha_tgt.qla_tgt); + qlt_stop_phase2(vha->vha_tgt.qla_tgt); } return count; @@ -1101,6 +1102,7 @@ static ssize_t tcm_qla2xxx_npiv_tpg_enable_store(struct config_item *item, atomic_set(&tpg->lport_tpg_enabled, 0); qlt_stop_phase1(vha->vha_tgt.qla_tgt); + qlt_stop_phase2(vha->vha_tgt.qla_tgt); } return count; diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c index 022fcd2e47026de9430a0fb5d3d139d35851a21f..6748e82c6352ff0db4cccf33159533ee63891e57 100644 --- a/drivers/scsi/scsi_devinfo.c +++ b/drivers/scsi/scsi_devinfo.c @@ -392,8 +392,8 @@ EXPORT_SYMBOL(scsi_dev_info_list_add_keyed); /** * scsi_dev_info_list_find - find a matching dev_info list entry. - * @vendor: vendor string - * @model: model (product) string + * @vendor: full vendor string + * @model: full model (product) string * @key: specify list to use * * Description: @@ -408,7 +408,7 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor, struct scsi_dev_info_list *devinfo; struct scsi_dev_info_list_table *devinfo_table = scsi_devinfo_lookup_by_key(key); - size_t vmax, mmax; + size_t vmax, mmax, mlen; const char *vskip, *mskip; if (IS_ERR(devinfo_table)) @@ -447,15 +447,19 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor, dev_info_list) { if (devinfo->compatible) { /* - * Behave like the older version of get_device_flags. + * vendor strings must be an exact match */ - if (memcmp(devinfo->vendor, vskip, vmax) || - (vmax < sizeof(devinfo->vendor) && - devinfo->vendor[vmax])) + if (vmax != strnlen(devinfo->vendor, + sizeof(devinfo->vendor)) || + memcmp(devinfo->vendor, vskip, vmax)) continue; - if (memcmp(devinfo->model, mskip, mmax) || - (mmax < sizeof(devinfo->model) && - devinfo->model[mmax])) + + /* + * @model specifies the full string, and + * must be larger or equal to devinfo->model + */ + mlen = strnlen(devinfo->model, sizeof(devinfo->model)); + if (mmax < mlen || memcmp(devinfo->model, mskip, mlen)) continue; return devinfo; } else { diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c index d0219e36080c3b79109ac405eb0cd726545585fc..e626fc2cc781316b09e6c1f82db1333e6052d7c4 100644 --- a/drivers/scsi/scsi_transport_spi.c +++ b/drivers/scsi/scsi_transport_spi.c @@ -349,7 +349,7 @@ store_spi_transport_##field(struct device *dev, \ struct spi_transport_attrs *tp \ = (struct spi_transport_attrs *)&starget->starget_data; \ \ - if (i->f->set_##field) \ + if (!i->f->set_##field) \ return -EINVAL; \ val = simple_strtoul(buf, NULL, 0); \ if (val > tp->max_##field) \ diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 8d6b13827c3906864c738f127acd75e370045984..633e4beaf7cb1f98769e6a9997e9eb3f9d45c5c4 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -695,8 +695,10 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) hp->flags = input_size; /* structure abuse ... */ hp->pack_id = old_hdr.pack_id; hp->usr_ptr = NULL; - if (__copy_from_user(cmnd, buf, cmd_size)) + if (__copy_from_user(cmnd, buf, cmd_size)) { + sg_remove_request(sfp, srp); return -EFAULT; + } /* * SG_DXFER_TO_FROM_DEV is functionally equivalent to SG_DXFER_FROM_DEV, * but is is possible that the app intended SG_DXFER_TO_DEV, because there diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index d0389b20574d0f778e2bfd95b07e80458970dbd5..5be3d6b7991b4a1854729edc5fdb2dcc8b6d7f75 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c @@ -748,7 +748,7 @@ static int sr_probe(struct device *dev) cd->cdi.disk = disk; if (register_cdrom(&cd->cdi)) - goto fail_put; + goto fail_minor; /* * Initialize block layer runtime PM stuffs before the @@ -766,6 +766,10 @@ static int sr_probe(struct device *dev) return 0; +fail_minor: + spin_lock(&sr_index_lock); + clear_bit(minor, sr_index_bits); + spin_unlock(&sr_index_lock); fail_put: put_disk(disk); fail_free: diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c index 4b76913104f7ba3c1e41f075d816c584ffaa5410..ac4975474c42f6b2074101a755144e6b4ca0d7ea 100644 --- a/drivers/scsi/ufs/ufs-qcom.c +++ b/drivers/scsi/ufs/ufs-qcom.c @@ -2650,15 +2650,15 @@ static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba, bool no_sleep) return; /* sleep a bit intermittently as we are dumping too much data */ - usleep_range(1000, 1100); + udelay(1000); ufs_qcom_testbus_read(hba); - usleep_range(1000, 1100); + udelay(1000); ufs_qcom_print_unipro_testbus(hba); - usleep_range(1000, 1100); + udelay(1000); ufs_qcom_print_utp_hci_testbus(hba); - usleep_range(1000, 1100); + udelay(1000); ufs_qcom_phy_dbg_register_dump(phy); - usleep_range(1000, 1100); + udelay(1000); } /** diff --git a/drivers/scsi/ufs/ufshcd-crypto-qti.c b/drivers/scsi/ufs/ufshcd-crypto-qti.c index cfae1e5dede23274d2b0338dace83f3f581255c2..4e23fb06c3852fb8b803b7a204cdcbd2e39decde 100644 --- a/drivers/scsi/ufs/ufshcd-crypto-qti.c +++ b/drivers/scsi/ufs/ufshcd-crypto-qti.c @@ -16,7 +16,10 @@ #include #include #include - +#if IS_ENABLED(CONFIG_CRYPTO_DEV_QCOM_ICE) +#include +#include +#endif #include "ufshcd-crypto-qti.h" #define MINIMUM_DUN_SIZE 512 @@ -30,6 +33,7 @@ static struct ufs_hba_crypto_variant_ops ufshcd_crypto_qti_variant_ops = { .disable = ufshcd_crypto_qti_disable, .resume = ufshcd_crypto_qti_resume, .debug = ufshcd_crypto_qti_debug, + .prepare_lrbp_crypto = ufshcd_crypto_qti_prep_lrbp_crypto, }; static uint8_t get_data_unit_size_mask(unsigned int data_unit_size) @@ -289,6 +293,65 @@ int ufshcd_crypto_qti_init_crypto(struct ufs_hba *hba, return err; } +int ufshcd_crypto_qti_prep_lrbp_crypto(struct ufs_hba *hba, + struct scsi_cmnd *cmd, + struct ufshcd_lrb *lrbp) +{ + struct bio_crypt_ctx *bc; + int ret = 0; +#if IS_ENABLED(CONFIG_CRYPTO_DEV_QCOM_ICE) + struct ice_data_setting setting; + bool bypass = true; + short key_index = 0; +#endif + struct request *req; + + lrbp->crypto_enable = false; + req = cmd->request; + if (!req || !req->bio) + return ret; + + if (!bio_crypt_should_process(req)) { +#if IS_ENABLED(CONFIG_CRYPTO_DEV_QCOM_ICE) + ret = qcom_ice_config_start(req, &setting); + if (!ret) { + key_index = setting.crypto_data.key_index; + bypass = (rq_data_dir(req) == WRITE) ? + setting.encr_bypass : setting.decr_bypass; + lrbp->crypto_enable = !bypass; + lrbp->crypto_key_slot = key_index; + lrbp->data_unit_num = req->bio->bi_iter.bi_sector >> + ICE_CRYPTO_DATA_UNIT_4_KB; + } else { + pr_err("%s crypto config failed err = %d\n", __func__, + ret); + } +#endif + return ret; + } + bc = req->bio->bi_crypt_context; + + if (WARN_ON(!ufshcd_is_crypto_enabled(hba))) { + /* + * Upper layer asked us to do inline encryption + * but that isn't enabled, so we fail this request. + */ + return -EINVAL; + } + if (!ufshcd_keyslot_valid(hba, bc->bc_keyslot)) + return -EINVAL; + + lrbp->crypto_enable = true; + lrbp->crypto_key_slot = bc->bc_keyslot; + if (bc->is_ext4) { + lrbp->data_unit_num = (u64)cmd->request->bio->bi_iter.bi_sector; + lrbp->data_unit_num >>= 3; + } else { + lrbp->data_unit_num = bc->bc_dun[0]; + } + return 0; +} + int ufshcd_crypto_qti_debug(struct ufs_hba *hba) { return crypto_qti_debug(hba->crypto_vops->priv); diff --git a/drivers/scsi/ufs/ufshcd-crypto-qti.h b/drivers/scsi/ufs/ufshcd-crypto-qti.h index 1e75ce0a5c92661ad146542423217c75a3e914b1..261794ebf595ce8da8c5b4d997e8320ac29d3cc3 100644 --- a/drivers/scsi/ufs/ufshcd-crypto-qti.h +++ b/drivers/scsi/ufs/ufshcd-crypto-qti.h @@ -41,6 +41,9 @@ int ufshcd_crypto_qti_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op); int ufshcd_crypto_qti_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op); +int ufshcd_crypto_qti_prep_lrbp_crypto(struct ufs_hba *hba, + struct scsi_cmnd *cmd, + struct ufshcd_lrb *lrbp); #ifdef CONFIG_SCSI_UFS_CRYPTO_QTI void ufshcd_crypto_qti_set_vops(struct ufs_hba *hba); #else diff --git a/drivers/scsi/ufs/ufshcd-crypto.c b/drivers/scsi/ufs/ufshcd-crypto.c index 240745526135807887e832584b21274ca130d42b..d6ef8a7699b2daa18a36147db59af69754a120f3 100644 --- a/drivers/scsi/ufs/ufshcd-crypto.c +++ b/drivers/scsi/ufs/ufshcd-crypto.c @@ -401,7 +401,6 @@ int ufshcd_prepare_lrbp_crypto_spec(struct ufs_hba *hba, lrbp->crypto_enable = true; lrbp->crypto_key_slot = bc->bc_keyslot; lrbp->data_unit_num = bc->bc_dun[0]; - return 0; } EXPORT_SYMBOL_GPL(ufshcd_prepare_lrbp_crypto_spec); diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index abedcca0e79378bf976a0b2a2b6f4ddb5d952497..c121930dfa4f99e195ef0f9649cb792c9e7b3cfb 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -3803,6 +3803,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) err = ufshcd_prepare_lrbp_crypto(hba, cmd, lrbp); if (err) { + ufshcd_release(hba, false); lrbp->cmd = NULL; clear_bit_unlock(tag, &hba->lrb_in_use); goto out; @@ -3825,6 +3826,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) err = ufshcd_map_sg(hba, lrbp); if (err) { + ufshcd_release(hba, false); lrbp->cmd = NULL; clear_bit_unlock(tag, &hba->lrb_in_use); ufshcd_release_all(hba); @@ -6765,7 +6767,6 @@ static int ufshcd_bkops_ctrl(struct ufs_hba *hba, err = ufshcd_enable_auto_bkops(hba); else err = ufshcd_disable_auto_bkops(hba); - hba->urgent_bkops_lvl = curr_status; out: return err; } diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index 7621eaf514044a60ddaa89846bb609bcc396ef0d..41e37bcefb05ac1474f00c1af322fbd44e701ef9 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -422,6 +422,7 @@ struct ufs_hba_crypto_variant_ops { struct scsi_cmnd *cmd, struct ufshcd_lrb *lrbp); void *priv; + void *crypto_DO_NOT_USE[8]; }; /* clock gating state */ @@ -1086,6 +1087,7 @@ struct ufs_hba { union ufs_crypto_cap_entry *crypto_cap_array; u32 crypto_cfg_register; struct keyslot_manager *ksm; + void *crypto_DO_NOT_USE[8]; #endif /* CONFIG_SCSI_UFS_CRYPTO */ }; diff --git a/drivers/soc/qcom/bgcom_interface.c b/drivers/soc/qcom/bgcom_interface.c index a8ca6c05b76c52601404a2663900bbd018ba3a14..25cf9ce9ab8df8900bf76888495d017d7087609e 100644 --- a/drivers/soc/qcom/bgcom_interface.c +++ b/drivers/soc/qcom/bgcom_interface.c @@ -34,8 +34,15 @@ #include #include #include +#include +#include + +#include "peripheral-loader.h" +#include "../../misc/qseecom_kernel.h" +#include "pil_bg_intf.h" #define BGCOM "bg_com_dev" +#define SECURE_APP "bgapp" #define BGDAEMON_LDO09_LPM_VTG 0 #define BGDAEMON_LDO09_NPM_VTG 10000 @@ -73,6 +80,12 @@ struct bgdaemon_priv { struct workqueue_struct *bgdaemon_wq; struct work_struct bgdaemon_load_twm_bg_work; bool bg_twm_wear_load; + struct qseecom_handle *qseecom_handle; + int app_status; + unsigned long attrs; + u32 cmd_status; + struct device *platform_dev; + void *twm_data_buff; }; struct bg_event { @@ -364,6 +377,197 @@ static int adsp_down2_bg(void) return 0; } +/** + * load_bg_tzapp() - Called to load TZ app. + * + * Return: 0 on success. Error code on failure. + */ +static int load_bg_tzapp(void) +{ + int rc; + + /* return success if already loaded */ + if (dev->qseecom_handle && !dev->app_status) + return 0; + /* Load the APP */ + rc = qseecom_start_app(&dev->qseecom_handle, SECURE_APP, SZ_4K); + if (rc < 0) { + pr_err("BG TZ app load failure\n"); + dev->app_status = RESULT_FAILURE; + return -EIO; + } + dev->app_status = RESULT_SUCCESS; + return 0; +} + +/** + * get_cmd_rsp_buffers() - Function sets cmd & rsp buffer pointers and + * aligns buffer lengths + * @hdl: index of qseecom_handle + * @cmd: req buffer - set to qseecom_handle.sbuf + * @cmd_len: ptr to req buffer len + * @rsp: rsp buffer - set to qseecom_handle.sbuf + offset + * @rsp_len: ptr to rsp buffer len + * + * Return: Success always . + */ +static int get_cmd_rsp_buffers(struct qseecom_handle *handle, void **cmd, + uint32_t *cmd_len, void **rsp, uint32_t *rsp_len) +{ + *cmd = handle->sbuf; + if (*cmd_len & QSEECOM_ALIGN_MASK) + *cmd_len = QSEECOM_ALIGN(*cmd_len); + + *rsp = handle->sbuf + *cmd_len; + if (*rsp_len & QSEECOM_ALIGN_MASK) + *rsp_len = QSEECOM_ALIGN(*rsp_len); + + return 0; +} + +/** + * tzapp_comm() - Function called to communicate with TZ APP. + * @req: struct containing command and parameters. + * + * Return: 0 on success. Error code on failure. + */ +static long tzapp_comm(struct tzapp_bg_req *req) +{ + struct tzapp_bg_req *bg_tz_req; + struct tzapp_bg_rsp *bg_tz_rsp; + int rc, req_len, rsp_len; + + /* Fill command structure */ + req_len = sizeof(struct tzapp_bg_req); + rsp_len = sizeof(struct tzapp_bg_rsp); + rc = get_cmd_rsp_buffers(dev->qseecom_handle, + (void **)&bg_tz_req, &req_len, + (void **)&bg_tz_rsp, &rsp_len); + if (rc) + goto end; + + bg_tz_req->tzapp_bg_cmd = req->tzapp_bg_cmd; + bg_tz_req->address_fw = req->address_fw; + bg_tz_req->size_fw = req->size_fw; + rc = qseecom_send_command(dev->qseecom_handle, + (void *)bg_tz_req, req_len, (void *)bg_tz_rsp, rsp_len); + pr_debug("BGAPP qseecom returned with value 0x%x and status 0x%x\n", + rc, bg_tz_rsp->status); + if (rc || bg_tz_rsp->status) + dev->cmd_status = bg_tz_rsp->status; + else + dev->cmd_status = 0; + +end: + return rc; +} + +/** + * tzapp_twm_data() - Called to read user data saved by BG in TWM + * Allocate region from dynamic memory and pass this region to + * tz to dump data content from BG. + * + * Return: 0 on success. Error code on failure. + */ +static int tzapp_twm_data(uint32_t num_words, void *read_buf) +{ + struct tzapp_bg_req bg_tz_req; + phys_addr_t start_addr; + void *region; + void *buf; + int ret = 0; + struct device dma_dev = {0}; + size_t data_len = num_words * 4; + + arch_setup_dma_ops(&dma_dev, 0, 0, NULL, 0); + + dev->attrs = 0; + dev->attrs |= DMA_ATTR_SKIP_ZEROING; + dev->attrs |= DMA_ATTR_STRONGLY_ORDERED; + + region = dma_alloc_attrs(dev->platform_dev, data_len, + &start_addr, GFP_KERNEL, dev->attrs); + + if (region == NULL) { + pr_debug( + "Failure to allocate twm data region of len %zx\n", + data_len); + return -ENOMEM; + } + + bg_tz_req.tzapp_bg_cmd = BGPIL_TWM_DATA; + bg_tz_req.address_fw = start_addr; + bg_tz_req.size_fw = data_len; + + ret = tzapp_comm(&bg_tz_req); + if (ret || dev->cmd_status) { + pr_debug("%s: BG twm data collection failed\n", + __func__); + dev->cmd_status; + goto error; + } + + buf = dma_remap(dev->platform_dev, region, start_addr, + data_len, dev->attrs); + if (!buf) { + pr_debug("%s: Remap failed\n", __func__); + ret = -EFAULT; + goto error; + } + + memcpy(read_buf, buf, data_len); + ret = 0; + +error: + dma_free_attrs(dev->platform_dev, data_len, region, + start_addr, dev->attrs); + return ret; +} + +/** + * twm_data_fetch() - Called to fetch data saved by BG in TWM + * + * Return: 0 on success. Error code on failure. + */ +static int twm_data_fetch(struct bg_ui_data *fui_obj_msg) +{ + int ret; + + dev->twm_data_buff = kmalloc_array(fui_obj_msg->num_of_words, + sizeof(uint32_t), GFP_KERNEL); + + if (dev->twm_data_buff == NULL) + return -ENOMEM; + + ret = tzapp_twm_data(fui_obj_msg->num_of_words, dev->twm_data_buff); + return ret; +} + +/** + * twm_data_read() - Called to read user data saved by BG in TWM + * + * Return: 0 on success. Error code on failure. + */ +static int twm_data_read(struct bg_ui_data *fui_obj_msg) +{ + int ret = 0; + void __user *result = (void *) + (uintptr_t)fui_obj_msg->buffer; + + if (dev->twm_data_buff == NULL) { + pr_err("Empty user buffer\n"); + return -EFAULT; + } + if (copy_to_user(result, dev->twm_data_buff, + fui_obj_msg->num_of_words * sizeof(uint32_t))) { + pr_err("copy to user failed\n"); + ret = -EFAULT; + } + kfree(dev->twm_data_buff); + dev->twm_data_buff == NULL; + return ret; +} + static long bg_com_ioctl(struct file *filp, unsigned int ui_bgcom_cmd, unsigned long arg) { @@ -443,6 +647,29 @@ static long bg_com_ioctl(struct file *filp, } ret = 0; break; + case BG_FETCH_TWM_DATA: + ret = load_bg_tzapp(); + if (ret) { + pr_err("%s: BG TZ app load failure\n", __func__); + } else { + pr_debug("bgapp loaded\n"); + if (copy_from_user(&ui_obj_msg, (void __user *) arg, + sizeof(ui_obj_msg))) { + pr_err("The copy from user failed\n"); + ret = -EFAULT; + } + ret = twm_data_fetch(&ui_obj_msg); + qseecom_shutdown_app(&dev->qseecom_handle); + } + break; + case BG_READ_TWM_DATA: + if (copy_from_user(&ui_obj_msg, (void __user *) arg, + sizeof(ui_obj_msg))) { + pr_err("The copy from user failed\n"); + ret = -EFAULT; + } + ret = twm_data_read(&ui_obj_msg); + break; default: ret = -ENOIOCTLCMD; break; @@ -505,6 +732,7 @@ static int bg_daemon_probe(struct platform_device *pdev) bgdaemon_configure_regulators(false); goto err_ret; } + dev->platform_dev = &pdev->dev; pr_info("%s success", __func__); err_device: @@ -617,6 +845,7 @@ static int ssr_bg_cb(struct notifier_block *this, case SUBSYS_AFTER_SHUTDOWN: if (dev->pending_bg_twm_wear_load) { bg_soft_reset(); + bgcom_bgdown_handler(); /* Load bg-twm */ dev->pending_bg_twm_wear_load = false; queue_work(dev->bgdaemon_wq, diff --git a/drivers/soc/qcom/bgcom_spi.c b/drivers/soc/qcom/bgcom_spi.c index f3ec8a421a184405911a73e0a200b8c68b987e16..afd9db90080c80831ed6ad026d04250a92136496 100644 --- a/drivers/soc/qcom/bgcom_spi.c +++ b/drivers/soc/qcom/bgcom_spi.c @@ -161,7 +161,7 @@ int bgcom_set_spi_state(enum bgcom_spi_state state) { struct bg_spi_priv *bg_spi = container_of(bg_com_drv, struct bg_spi_priv, lhandle); - struct device spi_dev = bg_spi->spi->master->dev; + const struct device spi_dev = bg_spi->spi->master->dev; ktime_t time_start, delta; s64 time_elapsed; @@ -177,7 +177,7 @@ int bgcom_set_spi_state(enum bgcom_spi_state state) while (!pm_runtime_status_suspended(spi_dev.parent)) { delta = ktime_sub(ktime_get(), time_start); time_elapsed = ktime_to_ms(delta); - BUG_ON(time_elapsed > 5 * MSEC_PER_SEC); + WARN_ON(time_elapsed > 5 * MSEC_PER_SEC); msleep(100); } } diff --git a/drivers/soc/qcom/boot_marker.c b/drivers/soc/qcom/boot_marker.c index 069862212eb3fbe3d276678a38531104861bcf32..37e2bf3a9b536ecef94dca21fb79b51eb86dfb70 100644 --- a/drivers/soc/qcom/boot_marker.c +++ b/drivers/soc/qcom/boot_marker.c @@ -115,6 +115,11 @@ EXPORT_SYMBOL(update_marker); static void set_bootloader_stats(bool hibernation_restore) { + if (IS_ERR_OR_NULL(boot_stats)) { + pr_err("boot_marker: imem not initialized!\n"); + return; + } + spin_lock(&boot_marker_list.slock); _create_boot_marker("M - APPSBL Start - ", readl_relaxed(&boot_stats->bootloader_start)); diff --git a/drivers/soc/qcom/crypto-qti-ice-regs.h b/drivers/soc/qcom/crypto-qti-ice-regs.h index d9e4cf2ad75fb6a7ba2c4ee86642940837b13f61..81c98c487427e81995f85a42ad3bab082d53e978 100644 --- a/drivers/soc/qcom/crypto-qti-ice-regs.h +++ b/drivers/soc/qcom/crypto-qti-ice-regs.h @@ -16,7 +16,11 @@ #include /* Register bits for ICE version */ +#if IS_ENABLED(CONFIG_MMC_QTI_NONCMDQ_ICE) +#define ICE_CORE_CURRENT_MAJOR_VERSION 0x02 +#else #define ICE_CORE_CURRENT_MAJOR_VERSION 0x03 +#endif #define ICE_CORE_STEP_REV_MASK 0xFFFF #define ICE_CORE_STEP_REV 0 /* bit 15-0 */ diff --git a/drivers/soc/qcom/pil_bg_intf.h b/drivers/soc/qcom/pil_bg_intf.h index d0781d5f5f32104edccb3cd0cfa533bbfcb3bf2e..bf76a75e2ee74676ff4311142e7ca2211193911e 100644 --- a/drivers/soc/qcom/pil_bg_intf.h +++ b/drivers/soc/qcom/pil_bg_intf.h @@ -24,6 +24,7 @@ enum bg_tz_commands { BGPIL_AUTH_MDT, BGPIL_DLOAD_CONT, BGPIL_GET_BG_VERSION, + BGPIL_TWM_DATA, }; /* tzapp bg request.*/ diff --git a/drivers/soc/qcom/qmi_rmnet.c b/drivers/soc/qcom/qmi_rmnet.c index 3ff4f8d94c0286bc55af1152f1f54a08104a181c..b5c47bbc7a72706843f9f1e9ddc2d990fba53997 100644 --- a/drivers/soc/qcom/qmi_rmnet.c +++ b/drivers/soc/qcom/qmi_rmnet.c @@ -17,6 +17,7 @@ #include #include #include +#include #include "qmi_rmnet_i.h" #include #include @@ -891,16 +892,14 @@ void qmi_rmnet_burst_fc_check(struct net_device *dev, } EXPORT_SYMBOL(qmi_rmnet_burst_fc_check); -static bool qmi_rmnet_is_tcp_ack(struct sk_buff *skb) +static bool _qmi_rmnet_is_tcp_ack(struct sk_buff *skb) { - unsigned int len = skb->len; - switch (skb->protocol) { /* TCPv4 ACKs */ case htons(ETH_P_IP): if ((ip_hdr(skb)->protocol == IPPROTO_TCP) && - (ip_hdr(skb)->ihl == 5) && - (len == 40 || len == 52) && + (ntohs(ip_hdr(skb)->tot_len) - (ip_hdr(skb)->ihl << 2) == + tcp_hdr(skb)->doff << 2) && ((tcp_flag_word(tcp_hdr(skb)) & cpu_to_be32(0x00FF0000)) == TCP_FLAG_ACK)) return true; @@ -909,7 +908,8 @@ static bool qmi_rmnet_is_tcp_ack(struct sk_buff *skb) /* TCPv6 ACKs */ case htons(ETH_P_IPV6): if ((ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) && - (len == 60 || len == 72) && + (ntohs(ipv6_hdr(skb)->payload_len) == + (tcp_hdr(skb)->doff) << 2) && ((tcp_flag_word(tcp_hdr(skb)) & cpu_to_be32(0x00FF0000)) == TCP_FLAG_ACK)) return true; @@ -919,6 +919,19 @@ static bool qmi_rmnet_is_tcp_ack(struct sk_buff *skb) return false; } +static inline bool qmi_rmnet_is_tcp_ack(struct sk_buff *skb) +{ + /* Locally generated TCP acks */ + if (skb_is_tcp_pure_ack(skb)) + return true; + + /* Forwarded */ + if (unlikely(_qmi_rmnet_is_tcp_ack(skb))) + return true; + + return false; +} + static int qmi_rmnet_get_queue_sa(struct qos_info *qos, struct sk_buff *skb) { struct rmnet_flow_map *itm; diff --git a/drivers/soc/qcom/sdx_ext_ipc.c b/drivers/soc/qcom/sdx_ext_ipc.c index a4d0c35f935328e2beb01306501a9bd5789c0280..2691bf1bf07b2f71e532ab509efc5bbbb478d6ee 100644 --- a/drivers/soc/qcom/sdx_ext_ipc.c +++ b/drivers/soc/qcom/sdx_ext_ipc.c @@ -142,15 +142,13 @@ static int sideband_notify(struct notifier_block *nb, switch (action) { - case EVT_WAKE_UP: + case EVENT_REQUEST_WAKE_UP: gpio_set_value(mdm->gpios[WAKEUP_OUT], 1); usleep_range(10000, 20000); gpio_set_value(mdm->gpios[WAKEUP_OUT], 0); break; - default: - dev_info(mdm->dev, "Invalid action passed %d\n", - action); } + return NOTIFY_OK; } diff --git a/drivers/soc/qcom/subsystem_restart.c b/drivers/soc/qcom/subsystem_restart.c index d97b129b7e661a5c8fb547c03c84f8932483c5c3..ad509458deee1a0dde982ce24317fe6b9989504f 100644 --- a/drivers/soc/qcom/subsystem_restart.c +++ b/drivers/soc/qcom/subsystem_restart.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2011-2019, The Linux Foundation. All rights reserved. +/* Copyright (c) 2011-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -804,6 +804,7 @@ static int subsystem_powerup(struct subsys_device *dev, void *data) pr_info("[%s:%d]: Powering up %s\n", current->comm, current->pid, name); reinit_completion(&dev->err_ready); + enable_all_irqs(dev); ret = dev->desc->powerup(dev->desc); if (ret < 0) { notify_each_subsys_device(&dev, 1, SUBSYS_POWERUP_FAILURE, @@ -819,7 +820,6 @@ static int subsystem_powerup(struct subsys_device *dev, void *data) pr_err("Powerup failure on %s\n", name); return ret; } - enable_all_irqs(dev); ret = wait_for_err_ready(dev); if (ret) { diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c index 0316fae20cfecc199ed0e4cd3af2359fc83c3c27..7c86a8ee03aa9650717753fcadc48006ec7a2cfe 100644 --- a/drivers/spi/spi-bcm-qspi.c +++ b/drivers/spi/spi-bcm-qspi.c @@ -683,7 +683,7 @@ static void read_from_hw(struct bcm_qspi *qspi, int slots) if (buf) buf[tp.byte] = read_rxram_slot_u8(qspi, slot); dev_dbg(&qspi->pdev->dev, "RD %02x\n", - buf ? buf[tp.byte] : 0xff); + buf ? buf[tp.byte] : 0x0); } else { u16 *buf = tp.trans->rx_buf; @@ -691,7 +691,7 @@ static void read_from_hw(struct bcm_qspi *qspi, int slots) buf[tp.byte / 2] = read_rxram_slot_u16(qspi, slot); dev_dbg(&qspi->pdev->dev, "RD %04x\n", - buf ? buf[tp.byte] : 0xffff); + buf ? buf[tp.byte / 2] : 0x0); } update_qspi_trans_byte_count(qspi, &tp, @@ -746,13 +746,13 @@ static int write_to_hw(struct bcm_qspi *qspi, struct spi_device *spi) while (!tstatus && slot < MSPI_NUM_CDRAM) { if (tp.trans->bits_per_word <= 8) { const u8 *buf = tp.trans->tx_buf; - u8 val = buf ? buf[tp.byte] : 0xff; + u8 val = buf ? buf[tp.byte] : 0x00; write_txram_slot_u8(qspi, slot, val); dev_dbg(&qspi->pdev->dev, "WR %02x\n", val); } else { const u16 *buf = tp.trans->tx_buf; - u16 val = buf ? buf[tp.byte / 2] : 0xffff; + u16 val = buf ? buf[tp.byte / 2] : 0x0000; write_txram_slot_u16(qspi, slot, val); dev_dbg(&qspi->pdev->dev, "WR %04x\n", val); diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c index eab27d41ba83f3ae713b0fccecc02aacb226834d..df6abc75bc1679a6220f1e016f50c9d343d08a7d 100644 --- a/drivers/spi/spi-bcm2835.c +++ b/drivers/spi/spi-bcm2835.c @@ -793,7 +793,7 @@ static int bcm2835_spi_probe(struct platform_device *pdev) goto out_clk_disable; } - err = devm_spi_register_master(&pdev->dev, master); + err = spi_register_master(master); if (err) { dev_err(&pdev->dev, "could not register SPI master: %d\n", err); goto out_clk_disable; @@ -813,6 +813,8 @@ static int bcm2835_spi_remove(struct platform_device *pdev) struct spi_master *master = platform_get_drvdata(pdev); struct bcm2835_spi *bs = spi_master_get_devdata(master); + spi_unregister_master(master); + /* Clear FIFOs, and disable the HW block */ bcm2835_wr(bs, BCM2835_SPI_CS, BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX); diff --git a/drivers/spi/spi-bcm2835aux.c b/drivers/spi/spi-bcm2835aux.c index e075712c501e87abc98fe66a75527c446ffd6f0f..b7f78e6d9bec6f334ca38583a4da84036d4dbff2 100644 --- a/drivers/spi/spi-bcm2835aux.c +++ b/drivers/spi/spi-bcm2835aux.c @@ -485,7 +485,7 @@ static int bcm2835aux_spi_probe(struct platform_device *pdev) goto out_clk_disable; } - err = devm_spi_register_master(&pdev->dev, master); + err = spi_register_master(master); if (err) { dev_err(&pdev->dev, "could not register SPI master: %d\n", err); goto out_clk_disable; @@ -505,6 +505,8 @@ static int bcm2835aux_spi_remove(struct platform_device *pdev) struct spi_master *master = platform_get_drvdata(pdev); struct bcm2835aux_spi *bs = spi_master_get_devdata(master); + spi_unregister_master(master); + bcm2835aux_spi_reset_hw(bs); /* disable the HW block by releasing the clock */ diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c index 837cb8d0bac6c9a1bd9f866192cd96950b13a68b..cb268cc4ba2b4b15f52adb9e4e5d9bb78cb07c4c 100644 --- a/drivers/spi/spi-dw-mid.c +++ b/drivers/spi/spi-dw-mid.c @@ -155,6 +155,7 @@ static struct dma_async_tx_descriptor *dw_spi_dma_prepare_tx(struct dw_spi *dws, if (!xfer->tx_buf) return NULL; + memset(&txconf, 0, sizeof(txconf)); txconf.direction = DMA_MEM_TO_DEV; txconf.dst_addr = dws->dma_addr; txconf.dst_maxburst = 16; @@ -201,6 +202,7 @@ static struct dma_async_tx_descriptor *dw_spi_dma_prepare_rx(struct dw_spi *dws, if (!xfer->rx_buf) return NULL; + memset(&rxconf, 0, sizeof(rxconf)); rxconf.direction = DMA_DEV_TO_MEM; rxconf.src_addr = dws->dma_addr; rxconf.src_maxburst = 16; @@ -226,19 +228,23 @@ static struct dma_async_tx_descriptor *dw_spi_dma_prepare_rx(struct dw_spi *dws, static int mid_spi_dma_setup(struct dw_spi *dws, struct spi_transfer *xfer) { - u16 dma_ctrl = 0; + u16 imr = 0, dma_ctrl = 0; dw_writel(dws, DW_SPI_DMARDLR, 0xf); dw_writel(dws, DW_SPI_DMATDLR, 0x10); - if (xfer->tx_buf) + if (xfer->tx_buf) { dma_ctrl |= SPI_DMA_TDMAE; - if (xfer->rx_buf) + imr |= SPI_INT_TXOI; + } + if (xfer->rx_buf) { dma_ctrl |= SPI_DMA_RDMAE; + imr |= SPI_INT_RXUI | SPI_INT_RXOI; + } dw_writel(dws, DW_SPI_DMACR, dma_ctrl); /* Set the interrupt mask */ - spi_umask_intr(dws, SPI_INT_TXOI | SPI_INT_RXUI | SPI_INT_RXOI); + spi_umask_intr(dws, imr); dws->transfer_handler = dma_transfer; @@ -268,7 +274,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, struct spi_transfer *xfer) dma_async_issue_pending(dws->txchan); } - return 0; + return 1; } static void mid_spi_dma_stop(struct dw_spi *dws) diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c index b461200871f8917b0be062a250459156bf18f6b0..d2428a8809c1c6a880573a32b643b3d1a5755bf7 100644 --- a/drivers/spi/spi-dw.c +++ b/drivers/spi/spi-dw.c @@ -305,6 +305,9 @@ static int dw_spi_transfer_one(struct spi_master *master, dws->len = transfer->len; spin_unlock_irqrestore(&dws->buf_lock, flags); + /* Ensure dw->rx and dw->rx_end are visible */ + smp_mb(); + spi_enable_chip(dws, 0); /* Handle per transfer options for bpw and speed */ @@ -381,11 +384,8 @@ static int dw_spi_transfer_one(struct spi_master *master, spi_enable_chip(dws, 1); - if (dws->dma_mapped) { - ret = dws->dma_ops->dma_transfer(dws, transfer); - if (ret < 0) - return ret; - } + if (dws->dma_mapped) + return dws->dma_ops->dma_transfer(dws, transfer); if (chip->poll_mode) return poll_transfer(dws); @@ -496,6 +496,8 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws) dws->dma_addr = (dma_addr_t)(dws->paddr + DW_SPI_DR); spin_lock_init(&dws->buf_lock); + spi_master_set_devdata(master, dws); + ret = request_irq(dws->irq, dw_spi_irq, IRQF_SHARED, dev_name(dev), master); if (ret < 0) { @@ -526,11 +528,11 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws) dws->dma_inited = 0; } else { master->can_dma = dws->dma_ops->can_dma; + master->flags |= SPI_CONTROLLER_MUST_TX; } } - spi_master_set_devdata(master, dws); - ret = devm_spi_register_master(dev, master); + ret = spi_register_master(master); if (ret) { dev_err(&master->dev, "problem registering spi master\n"); goto err_dma_exit; @@ -554,6 +556,8 @@ void dw_spi_remove_host(struct dw_spi *dws) { dw_spi_debugfs_remove(dws); + spi_unregister_master(dws->master); + if (dws->dma_ops && dws->dma_ops->dma_exit) dws->dma_ops->dma_exit(dws); diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c index ca013dd4ff6bb19d410ae522fe1f91b27d5f7ace..befabddf897a4b87c21f8fce6b65a96f7db26ae2 100644 --- a/drivers/spi/spi-fsl-dspi.c +++ b/drivers/spi/spi-fsl-dspi.c @@ -49,6 +49,9 @@ #define SPI_MCR_PCSIS (0x3F << 16) #define SPI_MCR_CLR_TXF (1 << 11) #define SPI_MCR_CLR_RXF (1 << 10) +#define SPI_MCR_DIS_TXF (1 << 13) +#define SPI_MCR_DIS_RXF (1 << 12) +#define SPI_MCR_HALT (1 << 0) #define SPI_TCR 0x08 #define SPI_TCR_GET_TCNT(x) (((x) & 0xffff0000) >> 16) @@ -73,7 +76,7 @@ #define SPI_SR 0x2c #define SPI_SR_EOQF 0x10000000 #define SPI_SR_TCFQF 0x80000000 -#define SPI_SR_CLEAR 0xdaad0000 +#define SPI_SR_CLEAR 0x9aaf0000 #define SPI_RSER_TFFFE BIT(25) #define SPI_RSER_TFFFD BIT(24) @@ -883,9 +886,11 @@ static irqreturn_t dspi_interrupt(int irq, void *dev_id) trans_mode); } } + + return IRQ_HANDLED; } - return IRQ_HANDLED; + return IRQ_NONE; } static const struct of_device_id fsl_dspi_dt_ids[] = { @@ -903,6 +908,8 @@ static int dspi_suspend(struct device *dev) struct spi_master *master = dev_get_drvdata(dev); struct fsl_dspi *dspi = spi_master_get_devdata(master); + if (dspi->irq) + disable_irq(dspi->irq); spi_master_suspend(master); clk_disable_unprepare(dspi->clk); @@ -923,6 +930,8 @@ static int dspi_resume(struct device *dev) if (ret) return ret; spi_master_resume(master); + if (dspi->irq) + enable_irq(dspi->irq); return 0; } @@ -1024,8 +1033,8 @@ static int dspi_probe(struct platform_device *pdev) goto out_clk_put; } - ret = devm_request_irq(&pdev->dev, dspi->irq, dspi_interrupt, 0, - pdev->name, dspi); + ret = request_threaded_irq(dspi->irq, dspi_interrupt, NULL, + IRQF_SHARED, pdev->name, dspi); if (ret < 0) { dev_err(&pdev->dev, "Unable to attach DSPI interrupt\n"); goto out_clk_put; @@ -1035,7 +1044,7 @@ static int dspi_probe(struct platform_device *pdev) ret = dspi_request_dma(dspi, res->start); if (ret < 0) { dev_err(&pdev->dev, "can't get dma channels\n"); - goto out_clk_put; + goto out_free_irq; } } @@ -1048,11 +1057,14 @@ static int dspi_probe(struct platform_device *pdev) ret = spi_register_master(master); if (ret != 0) { dev_err(&pdev->dev, "Problem registering DSPI master\n"); - goto out_clk_put; + goto out_free_irq; } return ret; +out_free_irq: + if (dspi->irq) + free_irq(dspi->irq, dspi); out_clk_put: clk_disable_unprepare(dspi->clk); out_master_put: @@ -1067,13 +1079,29 @@ static int dspi_remove(struct platform_device *pdev) struct fsl_dspi *dspi = spi_master_get_devdata(master); /* Disconnect from the SPI framework */ + spi_unregister_controller(dspi->master); + + /* Disable RX and TX */ + regmap_update_bits(dspi->regmap, SPI_MCR, + SPI_MCR_DIS_TXF | SPI_MCR_DIS_RXF, + SPI_MCR_DIS_TXF | SPI_MCR_DIS_RXF); + + /* Stop Running */ + regmap_update_bits(dspi->regmap, SPI_MCR, SPI_MCR_HALT, SPI_MCR_HALT); + dspi_release_dma(dspi); + if (dspi->irq) + free_irq(dspi->irq, dspi); clk_disable_unprepare(dspi->clk); - spi_unregister_master(dspi->master); return 0; } +static void dspi_shutdown(struct platform_device *pdev) +{ + dspi_remove(pdev); +} + static struct platform_driver fsl_dspi_driver = { .driver.name = DRIVER_NAME, .driver.of_match_table = fsl_dspi_dt_ids, @@ -1081,6 +1109,7 @@ static struct platform_driver fsl_dspi_driver = { .driver.pm = &dspi_pm, .probe = dspi_probe, .remove = dspi_remove, + .shutdown = dspi_shutdown, }; module_platform_driver(fsl_dspi_driver); diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c index 3a4496b0d9f20bce5160952d792c6142082cbd6a..25ac91e53fbdfc45c8f806cee48a06ee9d91de5d 100644 --- a/drivers/spi/spi-geni-qcom.c +++ b/drivers/spi/spi-geni-qcom.c @@ -108,6 +108,16 @@ M_RX_FIFO_RD_ERR_EN | M_RX_FIFO_WR_ERR_EN | \ M_TX_FIFO_RD_ERR_EN | M_TX_FIFO_WR_ERR_EN) +/* SPI sampling registers */ +#define SE_GENI_CGC_CTRL (0x28) +#define SE_GENI_CFG_SEQ_START (0x84) +#define SE_GENI_CFG_REG108 (0x2B0) +#define SE_GENI_CFG_REG109 (0x2B4) +#define CPOL_CTRL_SHFT 1 +#define RX_IO_POS_FF_EN_SEL_SHFT 4 +#define RX_IO_EN2CORE_EN_DELAY_SHFT 8 +#define RX_SI_EN2IO_DELAY_SHFT 12 + struct gsi_desc_cb { struct spi_master *spi; struct spi_transfer *xfer; @@ -175,6 +185,8 @@ struct spi_geni_master { bool dis_autosuspend; bool cmd_done; struct spi_geni_ssr spi_ssr; + bool set_miso_sampling; + u32 miso_sampling_ctrl_val; }; static void spi_slv_setup(struct spi_geni_master *mas); @@ -862,6 +874,7 @@ static int spi_geni_prepare_transfer_hardware(struct spi_master *spi) int ret = 0, count = 0, proto; u32 max_speed = spi->cur_msg->spi->max_speed_hz; struct se_geni_rsc *rsc = &mas->spi_rsc; + u32 cpol, cpha, cfg_reg108, cfg_reg109, cfg_seq_start; mutex_lock(&mas->spi_ssr.ssr_lock); if (mas->spi_ssr.is_ssr_down) { @@ -1020,6 +1033,54 @@ static int spi_geni_prepare_transfer_hardware(struct spi_master *spi) "%s:Major:%d Minor:%d step:%dos%d\n", __func__, major, minor, step, mas->oversampling); } + + if (!mas->set_miso_sampling) + goto shared_se; + + cpol = geni_read_reg(mas->base, SE_SPI_CPOL); + cpha = geni_read_reg(mas->base, SE_SPI_CPHA); + cfg_reg108 = geni_read_reg(mas->base, SE_GENI_CFG_REG108); + cfg_reg109 = geni_read_reg(mas->base, SE_GENI_CFG_REG109); + /* clear CPOL bit */ + cfg_reg108 &= ~(1 << CPOL_CTRL_SHFT); + + if (major == 1 && minor == 0) { + /* Write 1 to RX_SI_EN2IO_DELAY reg */ + cfg_reg108 &= ~(0x7 << RX_SI_EN2IO_DELAY_SHFT); + cfg_reg108 |= (1 << RX_SI_EN2IO_DELAY_SHFT); + /* Write 0 to RX_IO_POS_FF_EN_SEL reg */ + cfg_reg108 &= ~(1 << RX_IO_POS_FF_EN_SEL_SHFT); + } else if ((major < 2) || (major == 2 && minor < 5)) { + /* Write 0 to RX_IO_EN2CORE_EN_DELAY reg */ + cfg_reg108 &= ~(0x7 << RX_IO_EN2CORE_EN_DELAY_SHFT); + } else { + /* + * Write miso_sampling_ctrl_set to + * RX_IO_EN2CORE_EN_DELAY reg + */ + cfg_reg108 &= ~(0x7 << RX_IO_EN2CORE_EN_DELAY_SHFT); + cfg_reg108 |= (mas->miso_sampling_ctrl_val << + RX_IO_EN2CORE_EN_DELAY_SHFT); + } + + geni_write_reg(cfg_reg108, mas->base, SE_GENI_CFG_REG108); + + if (cpol == 0 && cpha == 0) + cfg_reg109 = 1; + else if (cpol == 1 && cpha == 0) + cfg_reg109 = 0; + geni_write_reg(cfg_reg109, mas->base, + SE_GENI_CFG_REG109); + if (!(major == 1 && minor == 0)) + geni_write_reg(1, mas->base, SE_GENI_CFG_SEQ_START); + cfg_reg108 = geni_read_reg(mas->base, SE_GENI_CFG_REG108); + cfg_reg109 = geni_read_reg(mas->base, SE_GENI_CFG_REG109); + cfg_seq_start = geni_read_reg(mas->base, SE_GENI_CFG_SEQ_START); + + GENI_SE_DBG(mas->ipc, false, mas->dev, + "%s cfg108: 0x%x cfg109: 0x%x cfg_seq_start: 0x%x\n", + __func__, cfg_reg108, cfg_reg109, cfg_seq_start); +shared_se: if (mas->dis_autosuspend) GENI_SE_DBG(mas->ipc, false, mas->dev, "Auto Suspend is disabled\n"); @@ -1739,6 +1800,16 @@ static int spi_geni_probe(struct platform_device *pdev) geni_mas->dis_autosuspend = of_property_read_bool(pdev->dev.of_node, "qcom,disable-autosuspend"); + + geni_mas->set_miso_sampling = of_property_read_bool(pdev->dev.of_node, + "qcom,set-miso-sampling"); + if (geni_mas->set_miso_sampling) { + if (!of_property_read_u32(pdev->dev.of_node, + "qcom,miso-sampling-ctrl-val", + &geni_mas->miso_sampling_ctrl_val)) + dev_info(&pdev->dev, "MISO_SAMPLING_SET: %d\n", + geni_mas->miso_sampling_ctrl_val); + } geni_mas->phys_addr = res->start; geni_mas->size = resource_size(res); geni_mas->base = devm_ioremap(&pdev->dev, res->start, diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c index 0c2867deb36fce48c74b92388d210371ebd1a6d0..da28c52c9da199b47935f6414a8c891468cc09e4 100644 --- a/drivers/spi/spi-mt65xx.c +++ b/drivers/spi/spi-mt65xx.c @@ -41,7 +41,6 @@ #define SPI_CFG0_SCK_LOW_OFFSET 8 #define SPI_CFG0_CS_HOLD_OFFSET 16 #define SPI_CFG0_CS_SETUP_OFFSET 24 -#define SPI_ADJUST_CFG0_SCK_LOW_OFFSET 16 #define SPI_ADJUST_CFG0_CS_HOLD_OFFSET 0 #define SPI_ADJUST_CFG0_CS_SETUP_OFFSET 16 @@ -53,6 +52,8 @@ #define SPI_CFG1_CS_IDLE_MASK 0xff #define SPI_CFG1_PACKET_LOOP_MASK 0xff00 #define SPI_CFG1_PACKET_LENGTH_MASK 0x3ff0000 +#define SPI_CFG2_SCK_HIGH_OFFSET 0 +#define SPI_CFG2_SCK_LOW_OFFSET 16 #define SPI_CMD_ACT BIT(0) #define SPI_CMD_RESUME BIT(1) @@ -259,7 +260,7 @@ static void mtk_spi_set_cs(struct spi_device *spi, bool enable) static void mtk_spi_prepare_transfer(struct spi_master *master, struct spi_transfer *xfer) { - u32 spi_clk_hz, div, sck_time, cs_time, reg_val = 0; + u32 spi_clk_hz, div, sck_time, cs_time, reg_val; struct mtk_spi *mdata = spi_master_get_devdata(master); spi_clk_hz = clk_get_rate(mdata->spi_clk); @@ -272,18 +273,18 @@ static void mtk_spi_prepare_transfer(struct spi_master *master, cs_time = sck_time * 2; if (mdata->dev_comp->enhance_timing) { + reg_val = (((sck_time - 1) & 0xffff) + << SPI_CFG2_SCK_HIGH_OFFSET); reg_val |= (((sck_time - 1) & 0xffff) - << SPI_CFG0_SCK_HIGH_OFFSET); - reg_val |= (((sck_time - 1) & 0xffff) - << SPI_ADJUST_CFG0_SCK_LOW_OFFSET); + << SPI_CFG2_SCK_LOW_OFFSET); writel(reg_val, mdata->base + SPI_CFG2_REG); - reg_val |= (((cs_time - 1) & 0xffff) + reg_val = (((cs_time - 1) & 0xffff) << SPI_ADJUST_CFG0_CS_HOLD_OFFSET); reg_val |= (((cs_time - 1) & 0xffff) << SPI_ADJUST_CFG0_CS_SETUP_OFFSET); writel(reg_val, mdata->base + SPI_CFG0_REG); } else { - reg_val |= (((sck_time - 1) & 0xff) + reg_val = (((sck_time - 1) & 0xff) << SPI_CFG0_SCK_HIGH_OFFSET); reg_val |= (((sck_time - 1) & 0xff) << SPI_CFG0_SCK_LOW_OFFSET); reg_val |= (((cs_time - 1) & 0xff) << SPI_CFG0_CS_HOLD_OFFSET); diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c index 5160e16d3a9852ffa7dd462ed66fb8c537bdcd63..1579eb2bc29f73e15674d70150b61d41193ae389 100644 --- a/drivers/spi/spi-pxa2xx.c +++ b/drivers/spi/spi-pxa2xx.c @@ -156,6 +156,7 @@ static const struct lpss_config lpss_platforms[] = { .tx_threshold_hi = 48, .cs_sel_shift = 8, .cs_sel_mask = 3 << 8, + .cs_clk_stays_gated = true, }, { /* LPSS_CNL_SSP */ .offset = 0x200, @@ -1826,7 +1827,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) /* Register with the SPI framework */ platform_set_drvdata(pdev, drv_data); - status = devm_spi_register_master(&pdev->dev, master); + status = spi_register_master(master); if (status != 0) { dev_err(&pdev->dev, "problem registering spi master\n"); goto out_error_clock_enabled; @@ -1856,6 +1857,8 @@ static int pxa2xx_spi_remove(struct platform_device *pdev) pm_runtime_get_sync(&pdev->dev); + spi_unregister_master(drv_data->master); + /* Disable the SSP at the peripheral and SOC level */ pxa2xx_spi_write(drv_data, SSCR0, 0); clk_disable_unprepare(ssp->clk); diff --git a/drivers/spi/spi-sun6i.c b/drivers/spi/spi-sun6i.c index 8533f4edd00af64058ae7f547373cd5cc737cae4..21a22d42818c8a73b5985f847470c218b3189f09 100644 --- a/drivers/spi/spi-sun6i.c +++ b/drivers/spi/spi-sun6i.c @@ -202,7 +202,7 @@ static int sun6i_spi_transfer_one(struct spi_master *master, struct spi_transfer *tfr) { struct sun6i_spi *sspi = spi_master_get_devdata(master); - unsigned int mclk_rate, div, timeout; + unsigned int mclk_rate, div, div_cdr1, div_cdr2, timeout; unsigned int start, end, tx_time; unsigned int trig_level; unsigned int tx_len = 0; @@ -291,14 +291,12 @@ static int sun6i_spi_transfer_one(struct spi_master *master, * First try CDR2, and if we can't reach the expected * frequency, fall back to CDR1. */ - div = mclk_rate / (2 * tfr->speed_hz); - if (div <= (SUN6I_CLK_CTL_CDR2_MASK + 1)) { - if (div > 0) - div--; - - reg = SUN6I_CLK_CTL_CDR2(div) | SUN6I_CLK_CTL_DRS; + div_cdr1 = DIV_ROUND_UP(mclk_rate, tfr->speed_hz); + div_cdr2 = DIV_ROUND_UP(div_cdr1, 2); + if (div_cdr2 <= (SUN6I_CLK_CTL_CDR2_MASK + 1)) { + reg = SUN6I_CLK_CTL_CDR2(div_cdr2 - 1) | SUN6I_CLK_CTL_DRS; } else { - div = ilog2(mclk_rate) - ilog2(tfr->speed_hz); + div = min(SUN6I_CLK_CTL_CDR1_MASK, order_base_2(div_cdr1)); reg = SUN6I_CLK_CTL_CDR1(div); } diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 56035637d8f6c40efdc3a845c03b79f592a02eca..49eee894f51d4ed7178a0d70ea6acc88485190d4 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -2264,7 +2264,8 @@ void spi_unregister_controller(struct spi_controller *ctlr) { struct spi_controller *found; int id = ctlr->bus_num; - int dummy; + + device_for_each_child(&ctlr->dev, NULL, __unregister); /* First make sure that this controller was ever added */ mutex_lock(&board_lock); @@ -2278,7 +2279,6 @@ void spi_unregister_controller(struct spi_controller *ctlr) list_del(&ctlr->list); mutex_unlock(&board_lock); - dummy = device_for_each_child(&ctlr->dev, NULL, __unregister); device_unregister(&ctlr->dev); /* free bus id */ mutex_lock(&board_lock); diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c index b8d91c2d6c898afa4392592a82d73a2bc96f3cb3..6044aacb7c79d847d372d165feec1e9b146e6d57 100644 --- a/drivers/spi/spidev.c +++ b/drivers/spi/spidev.c @@ -607,15 +607,20 @@ static int spidev_open(struct inode *inode, struct file *filp) static int spidev_release(struct inode *inode, struct file *filp) { struct spidev_data *spidev; + int dofree; mutex_lock(&device_list_lock); spidev = filp->private_data; filp->private_data = NULL; + spin_lock_irq(&spidev->spi_lock); + /* ... after we unbound from the underlying device? */ + dofree = (spidev->spi == NULL); + spin_unlock_irq(&spidev->spi_lock); + /* last close? */ spidev->users--; if (!spidev->users) { - int dofree; kfree(spidev->tx_buffer); spidev->tx_buffer = NULL; @@ -623,19 +628,14 @@ static int spidev_release(struct inode *inode, struct file *filp) kfree(spidev->rx_buffer); spidev->rx_buffer = NULL; - spin_lock_irq(&spidev->spi_lock); - if (spidev->spi) - spidev->speed_hz = spidev->spi->max_speed_hz; - - /* ... after we unbound from the underlying device? */ - dofree = (spidev->spi == NULL); - spin_unlock_irq(&spidev->spi_lock); - if (dofree) kfree(spidev); + else + spidev->speed_hz = spidev->spi->max_speed_hz; } #ifdef CONFIG_SPI_SLAVE - spi_slave_abort(spidev->spi); + if (!dofree) + spi_slave_abort(spidev->spi); #endif mutex_unlock(&device_list_lock); @@ -783,13 +783,13 @@ static int spidev_remove(struct spi_device *spi) { struct spidev_data *spidev = spi_get_drvdata(spi); + /* prevent new opens */ + mutex_lock(&device_list_lock); /* make sure ops on existing fds can abort cleanly */ spin_lock_irq(&spidev->spi_lock); spidev->spi = NULL; spin_unlock_irq(&spidev->spi_lock); - /* prevent new opens */ - mutex_lock(&device_list_lock); list_del(&spidev->device_entry); device_destroy(spidev_class, spidev->devt); clear_bit(MINOR(spidev->devt), minors); diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c index 1ff30da74e4693ed83e08e1391e83e3950017804..1d5e4d4cf2d9a15013a34f77fa8558a37cc22a48 100644 --- a/drivers/spmi/spmi-pmic-arb.c +++ b/drivers/spmi/spmi-pmic-arb.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2018, 2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -783,6 +783,8 @@ static int qpnpint_irq_domain_dt_translate(struct irq_domain *d, return 0; } +static struct lock_class_key qpnpint_irq_lock_class; + static int qpnpint_irq_domain_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hwirq) @@ -791,6 +793,7 @@ static int qpnpint_irq_domain_map(struct irq_domain *d, dev_dbg(&pmic_arb->spmic->dev, "virq = %u, hwirq = %lu\n", virq, hwirq); + irq_set_lockdep_class(virq, &qpnpint_irq_lock_class); irq_set_chip_and_handler(virq, &pmic_arb_irqchip, handle_level_irq); irq_set_chip_data(virq, d->host_data); irq_set_noprobe(virq); diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c index b187ff340094361466ccf452b075e3cd55079412..c682f1ea745cf28c1f451c883069330b3aa34a30 100644 --- a/drivers/staging/android/ion/ion_heap.c +++ b/drivers/staging/android/ion/ion_heap.c @@ -105,12 +105,12 @@ int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer, static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot) { - void *addr = vm_map_ram(pages, num, -1, pgprot); + void *addr = vmap(pages, num, VM_MAP, pgprot); if (!addr) return -ENOMEM; memset(addr, 0, PAGE_SIZE * num); - vm_unmap_ram(addr, num); + vunmap(addr); return 0; } diff --git a/drivers/staging/android/ion/ion_secure_util.c b/drivers/staging/android/ion/ion_secure_util.c index 9d831e321e35d12aec3fe1a7d5ae0ff5fbb19d45..ae85b7c325078183597cd9393286c3f303d0bc41 100644 --- a/drivers/staging/android/ion/ion_secure_util.c +++ b/drivers/staging/android/ion/ion_secure_util.c @@ -155,6 +155,8 @@ int ion_hyp_assign_sg(struct sg_table *sgt, int *dest_vm_list, int *dest_perms; int i; int ret = 0; + int j = -1; + int k = -1; if (dest_nelems <= 0) { pr_err("%s: dest_nelems invalid\n", @@ -173,10 +175,20 @@ int ion_hyp_assign_sg(struct sg_table *sgt, int *dest_vm_list, if (dest_vm_list[i] == VMID_CP_SEC_DISPLAY || dest_vm_list[i] == VMID_CP_DSP_EXT) dest_perms[i] = PERM_READ; + else if (dest_vm_list[i] == VMID_CP_CAMERA_ENCODE) { + j = i; + dest_perms[i] = PERM_READ | PERM_WRITE; + } else if (dest_vm_list[i] == VMID_CP_CAMERA) { + k = i; + dest_perms[i] = PERM_READ | PERM_WRITE; + } else dest_perms[i] = PERM_READ | PERM_WRITE; } + if ((j != -1) && (k != -1)) + dest_perms[j] = PERM_READ; + ret = hyp_assign_table(sgt, &source_vmid, 1, dest_vm_list, dest_perms, dest_nelems); diff --git a/drivers/staging/comedi/drivers/addi_apci_1032.c b/drivers/staging/comedi/drivers/addi_apci_1032.c index ccd1a91290bf9a27cc4471d5efdc6d1cebb6a22f..536a135cd00b39f1fe0bec2b696db38634e1c981 100644 --- a/drivers/staging/comedi/drivers/addi_apci_1032.c +++ b/drivers/staging/comedi/drivers/addi_apci_1032.c @@ -115,14 +115,22 @@ static int apci1032_cos_insn_config(struct comedi_device *dev, unsigned int *data) { struct apci1032_private *devpriv = dev->private; - unsigned int shift, oldmask; + unsigned int shift, oldmask, himask, lomask; switch (data[0]) { case INSN_CONFIG_DIGITAL_TRIG: if (data[1] != 0) return -EINVAL; shift = data[3]; - oldmask = (1U << shift) - 1; + if (shift < 32) { + oldmask = (1U << shift) - 1; + himask = data[4] << shift; + lomask = data[5] << shift; + } else { + oldmask = 0xffffffffu; + himask = 0; + lomask = 0; + } switch (data[2]) { case COMEDI_DIGITAL_TRIG_DISABLE: devpriv->ctrl = 0; @@ -145,8 +153,8 @@ static int apci1032_cos_insn_config(struct comedi_device *dev, devpriv->mode2 &= oldmask; } /* configure specified channels */ - devpriv->mode1 |= data[4] << shift; - devpriv->mode2 |= data[5] << shift; + devpriv->mode1 |= himask; + devpriv->mode2 |= lomask; break; case COMEDI_DIGITAL_TRIG_ENABLE_LEVELS: if (devpriv->ctrl != (APCI1032_CTRL_INT_ENA | @@ -163,8 +171,8 @@ static int apci1032_cos_insn_config(struct comedi_device *dev, devpriv->mode2 &= oldmask; } /* configure specified channels */ - devpriv->mode1 |= data[4] << shift; - devpriv->mode2 |= data[5] << shift; + devpriv->mode1 |= himask; + devpriv->mode2 |= lomask; break; default: return -EINVAL; diff --git a/drivers/staging/comedi/drivers/addi_apci_1500.c b/drivers/staging/comedi/drivers/addi_apci_1500.c index 63991c49ff2309e8a00bdd1bf28e4ad16eefcce4..c4e36fb6df9d59196d1247d77ea3cbf779fd6197 100644 --- a/drivers/staging/comedi/drivers/addi_apci_1500.c +++ b/drivers/staging/comedi/drivers/addi_apci_1500.c @@ -461,13 +461,14 @@ static int apci1500_di_cfg_trig(struct comedi_device *dev, struct apci1500_private *devpriv = dev->private; unsigned int trig = data[1]; unsigned int shift = data[3]; - unsigned int hi_mask = data[4] << shift; - unsigned int lo_mask = data[5] << shift; - unsigned int chan_mask = hi_mask | lo_mask; - unsigned int old_mask = (1 << shift) - 1; - unsigned int pm = devpriv->pm[trig] & old_mask; - unsigned int pt = devpriv->pt[trig] & old_mask; - unsigned int pp = devpriv->pp[trig] & old_mask; + unsigned int hi_mask; + unsigned int lo_mask; + unsigned int chan_mask; + unsigned int old_mask; + unsigned int pm; + unsigned int pt; + unsigned int pp; + unsigned int invalid_chan; if (trig > 1) { dev_dbg(dev->class_dev, @@ -475,11 +476,28 @@ static int apci1500_di_cfg_trig(struct comedi_device *dev, return -EINVAL; } - if (chan_mask > 0xffff) { + if (shift <= 16) { + hi_mask = data[4] << shift; + lo_mask = data[5] << shift; + old_mask = (1U << shift) - 1; + invalid_chan = (data[4] | data[5]) >> (16 - shift); + } else { + hi_mask = 0; + lo_mask = 0; + old_mask = 0xffff; + invalid_chan = data[4] | data[5]; + } + chan_mask = hi_mask | lo_mask; + + if (invalid_chan) { dev_dbg(dev->class_dev, "invalid digital trigger channel\n"); return -EINVAL; } + pm = devpriv->pm[trig] & old_mask; + pt = devpriv->pt[trig] & old_mask; + pp = devpriv->pp[trig] & old_mask; + switch (data[2]) { case COMEDI_DIGITAL_TRIG_DISABLE: /* clear trigger configuration */ diff --git a/drivers/staging/comedi/drivers/addi_apci_1564.c b/drivers/staging/comedi/drivers/addi_apci_1564.c index 9bfb79c2e5c8795e366d94c6650b634104e780ca..1b4ba19d599e8028eabe22d9a264df06db87c6c0 100644 --- a/drivers/staging/comedi/drivers/addi_apci_1564.c +++ b/drivers/staging/comedi/drivers/addi_apci_1564.c @@ -340,14 +340,22 @@ static int apci1564_cos_insn_config(struct comedi_device *dev, unsigned int *data) { struct apci1564_private *devpriv = dev->private; - unsigned int shift, oldmask; + unsigned int shift, oldmask, himask, lomask; switch (data[0]) { case INSN_CONFIG_DIGITAL_TRIG: if (data[1] != 0) return -EINVAL; shift = data[3]; - oldmask = (1U << shift) - 1; + if (shift < 32) { + oldmask = (1U << shift) - 1; + himask = data[4] << shift; + lomask = data[5] << shift; + } else { + oldmask = 0xffffffffu; + himask = 0; + lomask = 0; + } switch (data[2]) { case COMEDI_DIGITAL_TRIG_DISABLE: devpriv->ctrl = 0; @@ -371,8 +379,8 @@ static int apci1564_cos_insn_config(struct comedi_device *dev, devpriv->mode2 &= oldmask; } /* configure specified channels */ - devpriv->mode1 |= data[4] << shift; - devpriv->mode2 |= data[5] << shift; + devpriv->mode1 |= himask; + devpriv->mode2 |= lomask; break; case COMEDI_DIGITAL_TRIG_ENABLE_LEVELS: if (devpriv->ctrl != (APCI1564_DI_IRQ_ENA | @@ -389,8 +397,8 @@ static int apci1564_cos_insn_config(struct comedi_device *dev, devpriv->mode2 &= oldmask; } /* configure specified channels */ - devpriv->mode1 |= data[4] << shift; - devpriv->mode2 |= data[5] << shift; + devpriv->mode1 |= himask; + devpriv->mode2 |= lomask; break; default: return -EINVAL; diff --git a/drivers/staging/comedi/drivers/ni_6527.c b/drivers/staging/comedi/drivers/ni_6527.c index 84c62e25609435493838b85139b45867e5c7aaca..6e411b634015a396c0177bd91c748e2ecf1bb7e8 100644 --- a/drivers/staging/comedi/drivers/ni_6527.c +++ b/drivers/staging/comedi/drivers/ni_6527.c @@ -341,7 +341,7 @@ static int ni6527_intr_insn_config(struct comedi_device *dev, case COMEDI_DIGITAL_TRIG_ENABLE_EDGES: /* check shift amount */ shift = data[3]; - if (shift >= s->n_chan) { + if (shift >= 32) { mask = 0; rising = 0; falling = 0; diff --git a/drivers/staging/greybus/light.c b/drivers/staging/greybus/light.c index 4e7575147775dadc34dc3c48aa639e63c92ecf67..9fab0e2751aa6e4df2ba18a0a6e0db579c9d6ecc 100644 --- a/drivers/staging/greybus/light.c +++ b/drivers/staging/greybus/light.c @@ -1033,7 +1033,8 @@ static int gb_lights_light_config(struct gb_lights *glights, u8 id) light->channels_count = conf.channel_count; light->name = kstrndup(conf.name, NAMES_MAX, GFP_KERNEL); - + if (!light->name) + return -ENOMEM; light->channels = kcalloc(light->channels_count, sizeof(struct gb_channel), GFP_KERNEL); if (!light->channels) diff --git a/drivers/staging/greybus/sdio.c b/drivers/staging/greybus/sdio.c index 101ca5097fc9f5f05b158f3f36d31237306eb078..93e2c091c5655a2d0d8010c534745e22a283601f 100644 --- a/drivers/staging/greybus/sdio.c +++ b/drivers/staging/greybus/sdio.c @@ -412,6 +412,7 @@ static int gb_sdio_command(struct gb_sdio_host *host, struct mmc_command *cmd) struct gb_sdio_command_request request = {0}; struct gb_sdio_command_response response; struct mmc_data *data = host->mrq->data; + unsigned int timeout_ms; u8 cmd_flags; u8 cmd_type; int i; @@ -470,9 +471,12 @@ static int gb_sdio_command(struct gb_sdio_host *host, struct mmc_command *cmd) request.data_blksz = cpu_to_le16(data->blksz); } - ret = gb_operation_sync(host->connection, GB_SDIO_TYPE_COMMAND, - &request, sizeof(request), &response, - sizeof(response)); + timeout_ms = cmd->busy_timeout ? cmd->busy_timeout : + GB_OPERATION_TIMEOUT_DEFAULT; + + ret = gb_operation_sync_timeout(host->connection, GB_SDIO_TYPE_COMMAND, + &request, sizeof(request), &response, + sizeof(response), timeout_ms); if (ret < 0) goto out; diff --git a/drivers/staging/greybus/uart.c b/drivers/staging/greybus/uart.c index c6d01b800d3ccee281b80bb926a037b86f650927..2b297df88bdd3c6ac679cb36756e59728dbbf3a7 100644 --- a/drivers/staging/greybus/uart.c +++ b/drivers/staging/greybus/uart.c @@ -538,9 +538,9 @@ static void gb_tty_set_termios(struct tty_struct *tty, } if (C_CRTSCTS(tty) && C_BAUD(tty) != B0) - newline.flow_control |= GB_SERIAL_AUTO_RTSCTS_EN; + newline.flow_control = GB_SERIAL_AUTO_RTSCTS_EN; else - newline.flow_control &= ~GB_SERIAL_AUTO_RTSCTS_EN; + newline.flow_control = 0; if (memcmp(&gb_tty->line_coding, &newline, sizeof(newline))) { memcpy(&gb_tty->line_coding, &newline, sizeof(newline)); diff --git a/drivers/staging/iio/resolver/ad2s1210.c b/drivers/staging/iio/resolver/ad2s1210.c index 3e00df74b18c883d4e19f0bac7ec087988b58821..989d5eca8861863912a3b0d51aa12e1af2d11d15 100644 --- a/drivers/staging/iio/resolver/ad2s1210.c +++ b/drivers/staging/iio/resolver/ad2s1210.c @@ -126,17 +126,24 @@ static int ad2s1210_config_write(struct ad2s1210_state *st, u8 data) static int ad2s1210_config_read(struct ad2s1210_state *st, unsigned char address) { - struct spi_transfer xfer = { - .len = 2, - .rx_buf = st->rx, - .tx_buf = st->tx, + struct spi_transfer xfers[] = { + { + .len = 1, + .rx_buf = &st->rx[0], + .tx_buf = &st->tx[0], + .cs_change = 1, + }, { + .len = 1, + .rx_buf = &st->rx[1], + .tx_buf = &st->tx[1], + }, }; int ret = 0; ad2s1210_set_mode(MOD_CONFIG, st); st->tx[0] = address | AD2S1210_MSB_IS_HIGH; st->tx[1] = AD2S1210_REG_FAULT; - ret = spi_sync_transfer(st->sdev, &xfer, 1); + ret = spi_sync_transfer(st->sdev, xfers, 2); if (ret < 0) return ret; st->old_data = true; diff --git a/drivers/staging/rtl8712/wifi.h b/drivers/staging/rtl8712/wifi.h index 0ed2f44ab4e958e0cb095081ccd13932cd09b284..14b899fcd97c7c11d3aaa679207b4f8f481ecaaf 100644 --- a/drivers/staging/rtl8712/wifi.h +++ b/drivers/staging/rtl8712/wifi.h @@ -468,7 +468,7 @@ static inline unsigned char *get_hdr_bssid(unsigned char *pframe) /* block-ack parameters */ #define IEEE80211_ADDBA_PARAM_POLICY_MASK 0x0002 #define IEEE80211_ADDBA_PARAM_TID_MASK 0x003C -#define IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK 0xFFA0 +#define IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK 0xFFC0 #define IEEE80211_DELBA_PARAM_TID_MASK 0xF000 #define IEEE80211_DELBA_PARAM_INITIATOR_MASK 0x0800 @@ -562,13 +562,6 @@ struct ieee80211_ht_addt_info { #define IEEE80211_HT_IE_NON_GF_STA_PRSNT 0x0004 #define IEEE80211_HT_IE_NON_HT_STA_PRSNT 0x0010 -/* block-ack parameters */ -#define IEEE80211_ADDBA_PARAM_POLICY_MASK 0x0002 -#define IEEE80211_ADDBA_PARAM_TID_MASK 0x003C -#define IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK 0xFFA0 -#define IEEE80211_DELBA_PARAM_TID_MASK 0xF000 -#define IEEE80211_DELBA_PARAM_INITIATOR_MASK 0x0800 - /* * A-PMDU buffer sizes * According to IEEE802.11n spec size varies from 8K to 64K (in powers of 2) diff --git a/drivers/staging/rtl8723bs/core/rtw_wlan_util.c b/drivers/staging/rtl8723bs/core/rtw_wlan_util.c index f485f541e36d0ca4d6a364993ea69751733b76bc..d6de62ee681e5e4170b2fae906210081b3723866 100644 --- a/drivers/staging/rtl8723bs/core/rtw_wlan_util.c +++ b/drivers/staging/rtl8723bs/core/rtw_wlan_util.c @@ -1904,12 +1904,14 @@ int update_sta_support_rate(struct adapter *padapter, u8 *pvar_ie, uint var_ie_l pIE = (struct ndis_80211_var_ie *)rtw_get_ie(pvar_ie, _SUPPORTEDRATES_IE_, &ie_len, var_ie_len); if (pIE == NULL) return _FAIL; + if (ie_len > sizeof(pmlmeinfo->FW_sta_info[cam_idx].SupportedRates)) + return _FAIL; memcpy(pmlmeinfo->FW_sta_info[cam_idx].SupportedRates, pIE->data, ie_len); supportRateNum = ie_len; pIE = (struct ndis_80211_var_ie *)rtw_get_ie(pvar_ie, _EXT_SUPPORTEDRATES_IE_, &ie_len, var_ie_len); - if (pIE) + if (pIE && (ie_len <= sizeof(pmlmeinfo->FW_sta_info[cam_idx].SupportedRates) - supportRateNum)) memcpy((pmlmeinfo->FW_sta_info[cam_idx].SupportedRates + supportRateNum), pIE->data, ie_len); return _SUCCESS; diff --git a/drivers/staging/sm750fb/sm750.c b/drivers/staging/sm750fb/sm750.c index 67207b0554cd4c868a095d8b8ab7cfeb51cb9233..5d6f3686c0deb01b53f7a023fc23508577a22629 100644 --- a/drivers/staging/sm750fb/sm750.c +++ b/drivers/staging/sm750fb/sm750.c @@ -899,6 +899,7 @@ static int lynxfb_set_fbinfo(struct fb_info *info, int index) fix->visual = FB_VISUAL_PSEUDOCOLOR; break; case 16: + case 24: case 32: fix->visual = FB_VISUAL_TRUECOLOR; break; diff --git a/drivers/staging/wlan-ng/prism2usb.c b/drivers/staging/wlan-ng/prism2usb.c index d8d86761b790ae07744525e279c09084a2e85b3a..8d32b1603d10abd1630aad02b38588dcff82efd0 100644 --- a/drivers/staging/wlan-ng/prism2usb.c +++ b/drivers/staging/wlan-ng/prism2usb.c @@ -61,11 +61,25 @@ static int prism2sta_probe_usb(struct usb_interface *interface, const struct usb_device_id *id) { struct usb_device *dev; - + const struct usb_endpoint_descriptor *epd; + const struct usb_host_interface *iface_desc = interface->cur_altsetting; struct wlandevice *wlandev = NULL; struct hfa384x *hw = NULL; int result = 0; + if (iface_desc->desc.bNumEndpoints != 2) { + result = -ENODEV; + goto failed; + } + + result = -EINVAL; + epd = &iface_desc->endpoint[1].desc; + if (!usb_endpoint_is_bulk_in(epd)) + goto failed; + epd = &iface_desc->endpoint[2].desc; + if (!usb_endpoint_is_bulk_out(epd)) + goto failed; + dev = interface_to_usbdev(interface); wlandev = create_wlan(); if (!wlandev) { diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c index 40c7ae9822030743e147b957c0001b27b2f82f90..786a9e2c4e4da88e51510afc0dcca07404bcdcbf 100644 --- a/drivers/thermal/cpu_cooling.c +++ b/drivers/thermal/cpu_cooling.c @@ -432,11 +432,11 @@ static u32 cpu_power_to_freq(struct cpufreq_cooling_device *cpufreq_cdev, int i; struct freq_table *freq_table = cpufreq_cdev->freq_table; - for (i = 1; i <= cpufreq_cdev->max_level; i++) - if (power > freq_table[i].power) + for (i = 0; i < cpufreq_cdev->max_level; i++) + if (power >= freq_table[i].power) break; - return freq_table[i - 1].frequency; + return freq_table[i].frequency; } /** diff --git a/drivers/thermal/mtk_thermal.c b/drivers/thermal/mtk_thermal.c index 76b92083744c9cb9582565f9ac7ecb59016e88f5..1e61c09153c9abf0d02eb43d4ff21ba925da9415 100644 --- a/drivers/thermal/mtk_thermal.c +++ b/drivers/thermal/mtk_thermal.c @@ -407,8 +407,7 @@ static int mtk_thermal_bank_temperature(struct mtk_thermal_bank *bank) u32 raw; for (i = 0; i < conf->bank_data[bank->id].num_sensors; i++) { - raw = readl(mt->thermal_base + - conf->msr[conf->bank_data[bank->id].sensors[i]]); + raw = readl(mt->thermal_base + conf->msr[i]); temp = raw_to_mcelsius(mt, conf->bank_data[bank->id].sensors[i], @@ -545,8 +544,7 @@ static void mtk_thermal_init_bank(struct mtk_thermal *mt, int num, for (i = 0; i < conf->bank_data[num].num_sensors; i++) writel(conf->sensor_mux_values[conf->bank_data[num].sensors[i]], - mt->thermal_base + - conf->adcpnp[conf->bank_data[num].sensors[i]]); + mt->thermal_base + conf->adcpnp[i]); writel((1 << conf->bank_data[num].num_sensors) - 1, mt->thermal_base + TEMP_MONCTL0); diff --git a/drivers/thermal/qcom/qmi_sensors.c b/drivers/thermal/qcom/qmi_sensors.c index 7bca52588f51e14c7ffa5a9b49189c9036a62815..b4a036e8a45438971541c78029109637ad30b751 100644 --- a/drivers/thermal/qcom/qmi_sensors.c +++ b/drivers/thermal/qcom/qmi_sensors.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. +/* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -64,6 +64,11 @@ enum qmi_ts_sensor { QMI_SYS_THERM1, QMI_SYS_THERM2, QMI_TS_TSENS_1, + QMI_TS_RET_PA_0_FR1, + QMI_TS_WTR_PA_0_FR1, + QMI_TS_WTR_PA_1_FR1, + QMI_TS_WTR_PA_2_FR1, + QMI_TS_WTR_PA_3_FR1, QMI_TS_MAX_NR }; @@ -121,6 +126,11 @@ static char sensor_clients[QMI_TS_MAX_NR][QMI_CLIENT_NAME_LENGTH] = { {"sys_therm1"}, {"sys_therm2"}, {"modem_tsens1"}, + {"qfe_ret_pa0_fr1"}, + {"qfe_wtr_pa0_fr1"}, + {"qfe_wtr_pa1_fr1"}, + {"qfe_wtr_pa2_fr1"}, + {"qfe_wtr_pa3_fr1"}, }; static int32_t encode_qmi(int32_t val) diff --git a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c index c211a8e4a21051f57c8a622ffdcf06c10af2f15d..fa98c398d70f36b695dbe97221c373d930437b65 100644 --- a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c +++ b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c @@ -183,7 +183,7 @@ int ti_thermal_expose_sensor(struct ti_bandgap *bgp, int id, data = ti_bandgap_get_sensor_data(bgp, id); - if (!data || IS_ERR(data)) + if (!IS_ERR_OR_NULL(data)) data = ti_thermal_build_data(bgp, id); if (!data) @@ -210,7 +210,7 @@ int ti_thermal_remove_sensor(struct ti_bandgap *bgp, int id) data = ti_bandgap_get_sensor_data(bgp, id); - if (data && data->ti_thermal) { + if (!IS_ERR_OR_NULL(data) && data->ti_thermal) { if (data->our_zone) thermal_zone_device_unregister(data->ti_thermal); } @@ -276,7 +276,7 @@ int ti_thermal_unregister_cpu_cooling(struct ti_bandgap *bgp, int id) data = ti_bandgap_get_sensor_data(bgp, id); - if (data) { + if (!IS_ERR_OR_NULL(data)) { cpufreq_cooling_unregister(data->cool_dev); cpufreq_cpu_put(data->policy); } diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c index fc0ef13f2616a241c285f2fbb0a142e309f8d39e..d52221ae1b85af4fa1941606186bf64d9280f8a9 100644 --- a/drivers/tty/hvc/hvc_console.c +++ b/drivers/tty/hvc/hvc_console.c @@ -357,15 +357,14 @@ static int hvc_open(struct tty_struct *tty, struct file * filp) * tty fields and return the kref reference. */ if (rc) { - tty_port_tty_set(&hp->port, NULL); - tty->driver_data = NULL; - tty_port_put(&hp->port); printk(KERN_ERR "hvc_open: request_irq failed with rc %d.\n", rc); - } else + } else { /* We are ready... raise DTR/RTS */ if (C_BAUD(tty)) if (hp->ops->dtr_rts) hp->ops->dtr_rts(hp, 1); + tty_port_set_initialized(&hp->port, true); + } /* Force wakeup of the polling thread */ hvc_kick(); @@ -375,22 +374,12 @@ static int hvc_open(struct tty_struct *tty, struct file * filp) static void hvc_close(struct tty_struct *tty, struct file * filp) { - struct hvc_struct *hp; + struct hvc_struct *hp = tty->driver_data; unsigned long flags; if (tty_hung_up_p(filp)) return; - /* - * No driver_data means that this close was issued after a failed - * hvc_open by the tty layer's release_dev() function and we can just - * exit cleanly because the kref reference wasn't made. - */ - if (!tty->driver_data) - return; - - hp = tty->driver_data; - spin_lock_irqsave(&hp->port.lock, flags); if (--hp->port.count == 0) { @@ -398,6 +387,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp) /* We are done with the tty pointer now. */ tty_port_tty_set(&hp->port, NULL); + if (!tty_port_initialized(&hp->port)) + return; + if (C_HUPCL(tty)) if (hp->ops->dtr_rts) hp->ops->dtr_rts(hp, 0); @@ -414,6 +406,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp) * waking periodically to check chars_in_buffer(). */ tty_wait_until_sent(tty, HVC_CLOSE_WAIT); + tty_port_set_initialized(&hp->port, false); } else { if (hp->port.count < 0) printk(KERN_ERR "hvc_close %X: oops, count is %d\n", diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c index f46bd1af7a10b290bddeeda3aba53fd8a0acd665..c70e79a0e9f28abca88141e15a4ace54cdbb57f7 100644 --- a/drivers/tty/n_gsm.c +++ b/drivers/tty/n_gsm.c @@ -677,11 +677,10 @@ static struct gsm_msg *gsm_data_alloc(struct gsm_mux *gsm, u8 addr, int len, * FIXME: lock against link layer control transmissions */ -static void gsm_data_kick(struct gsm_mux *gsm) +static void gsm_data_kick(struct gsm_mux *gsm, struct gsm_dlci *dlci) { struct gsm_msg *msg, *nmsg; int len; - int skip_sof = 0; list_for_each_entry_safe(msg, nmsg, &gsm->tx_list, list) { if (gsm->constipated && msg->addr) @@ -703,18 +702,23 @@ static void gsm_data_kick(struct gsm_mux *gsm) print_hex_dump_bytes("gsm_data_kick: ", DUMP_PREFIX_OFFSET, gsm->txframe, len); - - if (gsm->output(gsm, gsm->txframe + skip_sof, - len - skip_sof) < 0) + if (gsm->output(gsm, gsm->txframe, len) < 0) break; /* FIXME: Can eliminate one SOF in many more cases */ gsm->tx_bytes -= msg->len; - /* For a burst of frames skip the extra SOF within the - burst */ - skip_sof = 1; list_del(&msg->list); kfree(msg); + + if (dlci) { + tty_port_tty_wakeup(&dlci->port); + } else { + int i = 0; + + for (i = 0; i < NUM_DLCI; i++) + if (gsm->dlci[i]) + tty_port_tty_wakeup(&gsm->dlci[i]->port); + } } } @@ -766,7 +770,7 @@ static void __gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg) /* Add to the actual output queue */ list_add_tail(&msg->list, &gsm->tx_list); gsm->tx_bytes += msg->len; - gsm_data_kick(gsm); + gsm_data_kick(gsm, dlci); } /** @@ -1227,7 +1231,7 @@ static void gsm_control_message(struct gsm_mux *gsm, unsigned int command, gsm_control_reply(gsm, CMD_FCON, NULL, 0); /* Kick the link in case it is idling */ spin_lock_irqsave(&gsm->tx_lock, flags); - gsm_data_kick(gsm); + gsm_data_kick(gsm, NULL); spin_unlock_irqrestore(&gsm->tx_lock, flags); break; case CMD_FCOFF: @@ -2426,7 +2430,7 @@ static void gsmld_write_wakeup(struct tty_struct *tty) /* Queue poll */ clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); spin_lock_irqsave(&gsm->tx_lock, flags); - gsm_data_kick(gsm); + gsm_data_kick(gsm, NULL); if (gsm->tx_bytes < TX_THRESH_LO) { gsm_dlci_data_sweep(gsm); } diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c index 5017a0f46b826574e90c30fb94535b8de6b18742..d6b790510c94eec7f5d4f362e9ba6a003e382a30 100644 --- a/drivers/tty/serial/8250/8250_core.c +++ b/drivers/tty/serial/8250/8250_core.c @@ -534,6 +534,7 @@ static void __init serial8250_isa_init_ports(void) */ up->mcr_mask = ~ALPHA_KLUDGE_MCR; up->mcr_force = ALPHA_KLUDGE_MCR; + serial8250_set_defaults(up); } /* chain base port ops to support Remote Supervisor Adapter */ @@ -557,7 +558,6 @@ static void __init serial8250_isa_init_ports(void) port->membase = old_serial_port[i].iomem_base; port->iotype = old_serial_port[i].io_type; port->regshift = old_serial_port[i].iomem_reg_shift; - serial8250_set_defaults(up); port->irqflags |= irqflag; if (serial8250_isa_config != NULL) diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c index 899f36b59af798c085deb70daedf7e37ae1678cb..ed81128bb42e00989398c4fae5a7d7a84c74bca0 100644 --- a/drivers/tty/serial/8250/8250_exar.c +++ b/drivers/tty/serial/8250/8250_exar.c @@ -229,7 +229,17 @@ static void setup_gpio(struct pci_dev *pcidev, u8 __iomem *p) * devices will export them as GPIOs, so we pre-configure them safely * as inputs. */ - u8 dir = pcidev->vendor == PCI_VENDOR_ID_EXAR ? 0xff : 0x00; + + u8 dir = 0x00; + + if ((pcidev->vendor == PCI_VENDOR_ID_EXAR) && + (pcidev->subsystem_vendor != PCI_VENDOR_ID_SEALEVEL)) { + // Configure GPIO as inputs for Commtech adapters + dir = 0xff; + } else { + // Configure GPIO as outputs for SeaLevel adapters + dir = 0x00; + } writeb(0x00, p + UART_EXAR_MPIOINT_7_0); writeb(0x00, p + UART_EXAR_MPIOLVL_7_0); diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c index fa909fa3c4cdbb218ad88728bc74c7b267f9211e..b1363114d538c46b2ad4d7cc3852401a158eccc4 100644 --- a/drivers/tty/serial/8250/8250_mtk.c +++ b/drivers/tty/serial/8250/8250_mtk.c @@ -45,8 +45,21 @@ mtk8250_set_termios(struct uart_port *port, struct ktermios *termios, unsigned long flags; unsigned int baud, quot; + /* + * Store the requested baud rate before calling the generic 8250 + * set_termios method. Standard 8250 port expects bauds to be + * no higher than (uartclk / 16) so the baud will be clamped if it + * gets out of that bound. Mediatek 8250 port supports speed + * higher than that, therefore we'll get original baud rate back + * after calling the generic set_termios method and recalculate + * the speed later in this method. + */ + baud = tty_termios_baud_rate(termios); + serial8250_do_set_termios(port, termios, old); + tty_termios_encode_baud_rate(termios, baud, baud); + /* * Mediatek UARTs use an extra highspeed register (UART_MTK_HIGHS) * @@ -85,6 +98,11 @@ mtk8250_set_termios(struct uart_port *port, struct ktermios *termios, */ spin_lock_irqsave(&port->lock, flags); + /* + * Update the per-port timeout. + */ + uart_update_timeout(port, termios->c_cflag, baud); + /* set DLAB we have cval saved in up->lcr from the call to the core */ serial_port_out(port, UART_LCR, up->lcr | UART_LCR_DLAB); serial_dl_write(up, quot); diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c index 637f72fb6427f520a2bedadd0fc74e62941c271d..e55b556337214177eb6def746ac4b63ef162545f 100644 --- a/drivers/tty/serial/amba-pl011.c +++ b/drivers/tty/serial/amba-pl011.c @@ -2605,6 +2605,7 @@ static int pl011_setup_port(struct device *dev, struct uart_amba_port *uap, uap->port.fifosize = uap->fifosize; uap->port.flags = UPF_BOOT_AUTOCONF; uap->port.line = index; + spin_lock_init(&uap->port.lock); amba_ports[index] = uap; diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c index 630065b551f5f1ea57ddc687c62147fec54d9894..dfa2db6ed3223e0a1d8ede869e2c3545f087efcf 100644 --- a/drivers/tty/serial/imx.c +++ b/drivers/tty/serial/imx.c @@ -538,6 +538,11 @@ static void dma_tx_callback(void *data) if (!uart_circ_empty(xmit) && !uart_tx_stopped(&sport->port)) imx_dma_tx(sport); + else if (sport->port.rs485.flags & SER_RS485_ENABLED) { + temp = readl(sport->port.membase + UCR4); + temp |= UCR4_TCEN; + writel(temp, sport->port.membase + UCR4); + } spin_unlock_irqrestore(&sport->port.lock, flags); } @@ -555,6 +560,10 @@ static void imx_dma_tx(struct imx_port *sport) if (sport->dma_is_txing) return; + temp = readl(sport->port.membase + UCR4); + temp &= ~UCR4_TCEN; + writel(temp, sport->port.membase + UCR4); + sport->tx_bytes = uart_circ_chars_pending(xmit); if (xmit->tail < xmit->head || xmit->head == 0) { @@ -617,10 +626,15 @@ static void imx_start_tx(struct uart_port *port) if (!(port->rs485.flags & SER_RS485_RX_DURING_TX)) imx_stop_rx(port); - /* enable transmitter and shifter empty irq */ - temp = readl(port->membase + UCR4); - temp |= UCR4_TCEN; - writel(temp, port->membase + UCR4); + /* + * Enable transmitter and shifter empty irq only if DMA is off. + * In the DMA case this is done in the tx-callback. + */ + if (!sport->dma_is_enabled) { + temp = readl(port->membase + UCR4); + temp |= UCR4_TCEN; + writel(temp, port->membase + UCR4); + } } if (!sport->dma_is_enabled) { diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c index c1655aba131f6b08723d9bcff88c588413b18cbc..590acca601348cb2afeab91bb2175881d1ef357e 100644 --- a/drivers/tty/serial/sc16is7xx.c +++ b/drivers/tty/serial/sc16is7xx.c @@ -1524,10 +1524,12 @@ static int __init sc16is7xx_init(void) #endif return ret; +#ifdef CONFIG_SERIAL_SC16IS7XX_SPI err_spi: #ifdef CONFIG_SERIAL_SC16IS7XX_I2C i2c_del_driver(&sc16is7xx_i2c_uart_driver); #endif +#endif err_i2c: uart_unregister_driver(&sc16is7xx_uart); return ret; diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c index b9ec4e2828e2182ffdf034f8b5500567b949447e..610cb8338d5350951e605f347dce67066d2fe19c 100644 --- a/drivers/tty/vt/keyboard.c +++ b/drivers/tty/vt/keyboard.c @@ -126,7 +126,11 @@ static DEFINE_SPINLOCK(func_buf_lock); /* guard 'func_buf' and friends */ static unsigned long key_down[BITS_TO_LONGS(KEY_CNT)]; /* keyboard key bitmap */ static unsigned char shift_down[NR_SHIFT]; /* shift state counters.. */ static bool dead_key_next; -static int npadch = -1; /* -1 or number assembled on pad */ + +/* Handles a number being assembled on the number pad */ +static bool npadch_active; +static unsigned int npadch_value; + static unsigned int diacr; static char rep; /* flag telling character repeat */ @@ -816,12 +820,12 @@ static void k_shift(struct vc_data *vc, unsigned char value, char up_flag) shift_state &= ~(1 << value); /* kludge */ - if (up_flag && shift_state != old_state && npadch != -1) { + if (up_flag && shift_state != old_state && npadch_active) { if (kbd->kbdmode == VC_UNICODE) - to_utf8(vc, npadch); + to_utf8(vc, npadch_value); else - put_queue(vc, npadch & 0xff); - npadch = -1; + put_queue(vc, npadch_value & 0xff); + npadch_active = false; } } @@ -839,7 +843,7 @@ static void k_meta(struct vc_data *vc, unsigned char value, char up_flag) static void k_ascii(struct vc_data *vc, unsigned char value, char up_flag) { - int base; + unsigned int base; if (up_flag) return; @@ -853,10 +857,12 @@ static void k_ascii(struct vc_data *vc, unsigned char value, char up_flag) base = 16; } - if (npadch == -1) - npadch = value; - else - npadch = npadch * base + value; + if (!npadch_active) { + npadch_value = 0; + npadch_active = true; + } + + npadch_value = npadch_value * base + value; } static void k_lock(struct vc_data *vc, unsigned char value, char up_flag) diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index 8a4e7879a7a6e73015264538c0bb1ac4066be99a..92267396ff40733c62a4ba38f31fa19aa2d56ed0 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -762,10 +762,19 @@ static const struct tty_port_operations vc_port_ops = { .destruct = vc_port_destruct, }; +/* + * Change # of rows and columns (0 means unchanged/the size of fg_console) + * [this is to be used together with some user program + * like resize that changes the hardware videomode] + */ +#define VC_MAXCOL (32767) +#define VC_MAXROW (32767) + int vc_allocate(unsigned int currcons) /* return 0 on success */ { struct vt_notifier_param param; struct vc_data *vc; + int err; WARN_CONSOLE_UNLOCKED(); @@ -795,6 +804,11 @@ int vc_allocate(unsigned int currcons) /* return 0 on success */ if (!*vc->vc_uni_pagedir_loc) con_set_default_unimap(vc); + err = -EINVAL; + if (vc->vc_cols > VC_MAXCOL || vc->vc_rows > VC_MAXROW || + vc->vc_screenbuf_size > KMALLOC_MAX_SIZE || !vc->vc_screenbuf_size) + goto err_free; + err = -ENOMEM; vc->vc_screenbuf = kzalloc(vc->vc_screenbuf_size, GFP_KERNEL); if (!vc->vc_screenbuf) goto err_free; @@ -812,7 +826,7 @@ int vc_allocate(unsigned int currcons) /* return 0 on success */ err_free: kfree(vc); vc_cons[currcons].d = NULL; - return -ENOMEM; + return err; } static inline int resize_screen(struct vc_data *vc, int width, int height, @@ -827,14 +841,6 @@ static inline int resize_screen(struct vc_data *vc, int width, int height, return err; } -/* - * Change # of rows and columns (0 means unchanged/the size of fg_console) - * [this is to be used together with some user program - * like resize that changes the hardware videomode] - */ -#define VC_RESIZE_MAXCOL (32767) -#define VC_RESIZE_MAXROW (32767) - /** * vc_do_resize - resizing method for the tty * @tty: tty being resized @@ -869,7 +875,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc, user = vc->vc_resize_user; vc->vc_resize_user = 0; - if (cols > VC_RESIZE_MAXCOL || lines > VC_RESIZE_MAXROW) + if (cols > VC_MAXCOL || lines > VC_MAXROW) return -EINVAL; new_cols = (cols ? cols : vc->vc_cols); @@ -880,7 +886,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc, if (new_cols == vc->vc_cols && new_rows == vc->vc_rows) return 0; - if (new_screen_size > KMALLOC_MAX_SIZE) + if (new_screen_size > KMALLOC_MAX_SIZE || !new_screen_size) return -EINVAL; newscreen = kzalloc(new_screen_size, GFP_USER); if (!newscreen) @@ -3031,6 +3037,7 @@ static int __init con_init(void) INIT_WORK(&vc_cons[currcons].SAK_work, vc_SAK); tty_port_init(&vc->port); visual_init(vc, currcons, 1); + /* Assuming vc->vc_{cols,rows,screenbuf_size} are sane here. */ vc->vc_screenbuf = kzalloc(vc->vc_screenbuf_size, GFP_NOWAIT); vc_init(vc, vc->vc_rows, vc->vc_cols, currcons || !vc->vc_sw->con_save_screen); diff --git a/drivers/uio/uio_pdrv_genirq.c b/drivers/uio/uio_pdrv_genirq.c index f598ecddc8a703a9e8cec96228d3bf526c92b517..b58a504240c4e0d776739e19f7450522d26f566b 100644 --- a/drivers/uio/uio_pdrv_genirq.c +++ b/drivers/uio/uio_pdrv_genirq.c @@ -148,7 +148,7 @@ static int uio_pdrv_genirq_probe(struct platform_device *pdev) if (!uioinfo->irq) { ret = platform_get_irq(pdev, 0); uioinfo->irq = ret; - if (ret == -ENXIO && pdev->dev.of_node) + if (ret == -ENXIO) uioinfo->irq = UIO_IRQ_NONE; else if (ret < 0) { dev_err(&pdev->dev, "failed to get IRQ\n"); diff --git a/drivers/usb/c67x00/c67x00-sched.c b/drivers/usb/c67x00/c67x00-sched.c index 7311ed61e99a4b3b510c4bb68329f3717a775a61..029c8bc54b7aab8470e8bcd5186f706ede52cd78 100644 --- a/drivers/usb/c67x00/c67x00-sched.c +++ b/drivers/usb/c67x00/c67x00-sched.c @@ -500,7 +500,7 @@ c67x00_giveback_urb(struct c67x00_hcd *c67x00, struct urb *urb, int status) c67x00_release_urb(c67x00, urb); usb_hcd_unlink_urb_from_ep(c67x00_hcd_to_hcd(c67x00), urb); spin_unlock(&c67x00->lock); - usb_hcd_giveback_urb(c67x00_hcd_to_hcd(c67x00), urb, urbp->status); + usb_hcd_giveback_urb(c67x00_hcd_to_hcd(c67x00), urb, status); spin_lock(&c67x00->lock); } diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c index 70306ae039c0dce1c24e0d501b00428e86fa635b..77410fb42eab03353c3f909a1266a40fbb2f4052 100644 --- a/drivers/usb/chipidea/core.c +++ b/drivers/usb/chipidea/core.c @@ -1159,6 +1159,29 @@ static void ci_controller_suspend(struct ci_hdrc *ci) enable_irq(ci->irq); } +/* + * Handle the wakeup interrupt triggered by extcon connector + * We need to call ci_irq again for extcon since the first + * interrupt (wakeup int) only let the controller be out of + * low power mode, but not handle any interrupts. + */ +static void ci_extcon_wakeup_int(struct ci_hdrc *ci) +{ + struct ci_hdrc_cable *cable_id, *cable_vbus; + u32 otgsc = hw_read_otgsc(ci, ~0); + + cable_id = &ci->platdata->id_extcon; + cable_vbus = &ci->platdata->vbus_extcon; + + if (!IS_ERR(cable_id->edev) && ci->is_otg && + (otgsc & OTGSC_IDIE) && (otgsc & OTGSC_IDIS)) + ci_irq(ci->irq, ci); + + if (!IS_ERR(cable_vbus->edev) && ci->is_otg && + (otgsc & OTGSC_BSVIE) && (otgsc & OTGSC_BSVIS)) + ci_irq(ci->irq, ci); +} + static int ci_controller_resume(struct device *dev) { struct ci_hdrc *ci = dev_get_drvdata(dev); @@ -1191,6 +1214,7 @@ static int ci_controller_resume(struct device *dev) enable_irq(ci->irq); if (ci_otg_is_fsm_mode(ci)) ci_otg_fsm_wakeup_by_srp(ci); + ci_extcon_wakeup_int(ci); } return 0; diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 38709bee4c202f5216a2fd22b248a34e75f96a19..0de467c8593db2a12c6b784968626945cdfa5969 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -602,7 +602,7 @@ static void acm_softint(struct work_struct *work) } if (test_and_clear_bit(ACM_ERROR_DELAY, &acm->flags)) { - for (i = 0; i < ACM_NR; i++) + for (i = 0; i < acm->rx_buflimit; i++) if (test_and_clear_bit(i, &acm->urbs_in_error_delay)) acm_submit_read_urb(acm, i, GFP_NOIO); } @@ -1734,6 +1734,8 @@ static int acm_pre_reset(struct usb_interface *intf) static const struct usb_device_id acm_ids[] = { /* quirky and broken devices */ + { USB_DEVICE(0x0424, 0x274e), /* Microchip Technology, Inc. (formerly SMSC) */ + .driver_info = DISABLE_ECHO, }, /* DISABLE ECHO in termios flag */ { USB_DEVICE(0x076d, 0x0006), /* Denso Cradle CU-321 */ .driver_info = NO_UNION_NORMAL, },/* has no union descriptor */ { USB_DEVICE(0x17ef, 0x7000), /* Lenovo USB modem */ diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c index 5e456a83779d563e40d3d0d4c862db62826bb246..b0471ce34011ab3f6b7204d370828e509022d94a 100644 --- a/drivers/usb/class/usblp.c +++ b/drivers/usb/class/usblp.c @@ -481,7 +481,8 @@ static int usblp_release(struct inode *inode, struct file *file) usb_autopm_put_interface(usblp->intf); if (!usblp->present) /* finish cleanup from disconnect */ - usblp_cleanup(usblp); + usblp_cleanup(usblp); /* any URBs must be dead */ + mutex_unlock(&usblp_mutex); return 0; } @@ -1388,9 +1389,11 @@ static void usblp_disconnect(struct usb_interface *intf) usblp_unlink_urbs(usblp); mutex_unlock(&usblp->mut); + usb_poison_anchored_urbs(&usblp->urbs); if (!usblp->used) usblp_cleanup(usblp); + mutex_unlock(&usblp_mutex); } diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 8e03def58017e12e9f2136b51d6d497f4d8ad386..4c6ac03fab5b0874bdd19456b3d2828ce1cb0c19 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -38,6 +38,7 @@ #define USB_VENDOR_GENESYS_LOGIC 0x05e3 #define USB_VENDOR_SMSC 0x0424 +#define USB_PRODUCT_USB5534B 0x5534 #define HUB_QUIRK_CHECK_PORT_AUTOSUSPEND 0x01 #define HUB_QUIRK_DISABLE_AUTOSUSPEND 0x02 @@ -5349,8 +5350,11 @@ static void hub_event(struct work_struct *work) } static const struct usb_device_id hub_id_table[] = { - { .match_flags = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_CLASS, + { .match_flags = USB_DEVICE_ID_MATCH_VENDOR + | USB_DEVICE_ID_MATCH_PRODUCT + | USB_DEVICE_ID_MATCH_INT_CLASS, .idVendor = USB_VENDOR_SMSC, + .idProduct = USB_PRODUCT_USB5534B, .bInterfaceClass = USB_CLASS_HUB, .driver_info = HUB_QUIRK_DISABLE_AUTOSUSPEND}, { .match_flags = USB_DEVICE_ID_MATCH_VENDOR diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index 00e80cfe614cefc4945c7947f4e37627891f29da..298c91f83aeec022f04c1061358099a0034ee421 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c @@ -1082,11 +1082,11 @@ void usb_disable_endpoint(struct usb_device *dev, unsigned int epaddr, if (usb_endpoint_out(epaddr)) { ep = dev->ep_out[epnum]; - if (reset_hardware) + if (reset_hardware && epnum != 0) dev->ep_out[epnum] = NULL; } else { ep = dev->ep_in[epnum]; - if (reset_hardware) + if (reset_hardware && epnum != 0) dev->ep_in[epnum] = NULL; } if (ep) { diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 7d3130b0209ee2ed0d1161657a2f233f12b2b1de..f3bbcbd708ae800c69eb5d214234cab6ad248cad 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -73,11 +73,12 @@ static const struct usb_device_id usb_quirk_list[] = { /* Logitech HD Webcam C270 */ { USB_DEVICE(0x046d, 0x0825), .driver_info = USB_QUIRK_RESET_RESUME }, - /* Logitech HD Pro Webcams C920, C920-C, C925e and C930e */ + /* Logitech HD Pro Webcams C920, C920-C, C922, C925e and C930e */ { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT }, { USB_DEVICE(0x046d, 0x0841), .driver_info = USB_QUIRK_DELAY_INIT }, { USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT }, { USB_DEVICE(0x046d, 0x085b), .driver_info = USB_QUIRK_DELAY_INIT }, + { USB_DEVICE(0x046d, 0x085c), .driver_info = USB_QUIRK_DELAY_INIT }, /* Logitech ConferenceCam CC3000e */ { USB_DEVICE(0x046d, 0x0847), .driver_info = USB_QUIRK_DELAY_INIT }, diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c index b8bcb007c92a935a0618a451e3b1f1a9e4a6832c..e3e0a3ab31daa7a3a00e6cfb40cd62fabbf92cab 100644 --- a/drivers/usb/dwc2/core_intr.c +++ b/drivers/usb/dwc2/core_intr.c @@ -364,10 +364,13 @@ static void dwc2_handle_wakeup_detected_intr(struct dwc2_hsotg *hsotg) if (ret && (ret != -ENOTSUPP)) dev_err(hsotg->dev, "exit hibernation failed\n"); + /* Change to L0 state */ + hsotg->lx_state = DWC2_L0; call_gadget(hsotg, resume); + } else { + /* Change to L0 state */ + hsotg->lx_state = DWC2_L0; } - /* Change to L0 state */ - hsotg->lx_state = DWC2_L0; } else { if (hsotg->params.hibernation) return; diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c index aeb6f7c84ea0ae83a83c5863e37ecf6eb97fbb78..03bc479d04e0d63cb90c8009c3dd52e952c69bb7 100644 --- a/drivers/usb/dwc2/gadget.c +++ b/drivers/usb/dwc2/gadget.c @@ -4723,12 +4723,6 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg, int irq) epnum, 0); } - ret = usb_add_gadget_udc(dev, &hsotg->gadget); - if (ret) { - dwc2_hsotg_ep_free_request(&hsotg->eps_out[0]->ep, - hsotg->ctrl_req); - return ret; - } dwc2_hsotg_dump(hsotg); return 0; diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c index daf0d37acb37f2ccd2714e6be6bc14832b1cb937..442cb93935dd4bffbcde8f01983dca54fa33c6cf 100644 --- a/drivers/usb/dwc2/platform.c +++ b/drivers/usb/dwc2/platform.c @@ -338,7 +338,8 @@ static void dwc2_driver_shutdown(struct platform_device *dev) { struct dwc2_hsotg *hsotg = platform_get_drvdata(dev); - disable_irq(hsotg->irq); + dwc2_disable_global_interrupts(hsotg); + synchronize_irq(hsotg->irq); } /** @@ -452,6 +453,17 @@ static int dwc2_driver_probe(struct platform_device *dev) if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) dwc2_lowlevel_hw_disable(hsotg); +#if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \ + IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE) + /* Postponed adding a new gadget to the udc class driver list */ + if (hsotg->gadget_enabled) { + retval = usb_add_gadget_udc(hsotg->dev, &hsotg->gadget); + if (retval) { + dwc2_hsotg_remove(hsotg); + goto error; + } + } +#endif /* CONFIG_USB_DWC2_PERIPHERAL || CONFIG_USB_DWC2_DUAL_ROLE */ return 0; error: diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index ac074ef131dc47c535de0a8b5b1e5b1744938588..872ac890189d89e0a5b7049f60b6779944cbddbc 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -1634,6 +1634,9 @@ static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request, int ret; + if (dwc3_gadget_is_suspended(dwc)) + return -EAGAIN; + spin_lock_irqsave(&dwc->lock, flags); ret = __dwc3_gadget_ep_queue(dep, req); spin_unlock_irqrestore(&dwc->lock, flags); diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index 9522b21db5fcdcbb5b2f285225085b1660197e7e..e872a4155df1d73ca6a4fb8f7725bd659a774d20 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -101,40 +101,43 @@ function_descriptors(struct usb_function *f, } /** - * next_ep_desc() - advance to the next EP descriptor + * next_desc() - advance to the next desc_type descriptor * @t: currect pointer within descriptor array + * @desc_type: descriptor type * - * Return: next EP descriptor or NULL + * Return: next desc_type descriptor or NULL * - * Iterate over @t until either EP descriptor found or + * Iterate over @t until either desc_type descriptor found or * NULL (that indicates end of list) encountered */ static struct usb_descriptor_header** -next_ep_desc(struct usb_descriptor_header **t) +next_desc(struct usb_descriptor_header **t, u8 desc_type) { for (; *t; t++) { - if ((*t)->bDescriptorType == USB_DT_ENDPOINT) + if ((*t)->bDescriptorType == desc_type) return t; } return NULL; } /* - * for_each_ep_desc()- iterate over endpoint descriptors in the - * descriptors list - * @start: pointer within descriptor array. - * @ep_desc: endpoint descriptor to use as the loop cursor + * for_each_desc() - iterate over desc_type descriptors in the + * descriptors list + * @start: pointer within descriptor array. + * @iter_desc: desc_type descriptor to use as the loop cursor + * @desc_type: wanted descriptr type */ -#define for_each_ep_desc(start, ep_desc) \ - for (ep_desc = next_ep_desc(start); \ - ep_desc; ep_desc = next_ep_desc(ep_desc+1)) +#define for_each_desc(start, iter_desc, desc_type) \ + for (iter_desc = next_desc(start, desc_type); \ + iter_desc; iter_desc = next_desc(iter_desc + 1, desc_type)) /** - * config_ep_by_speed() - configures the given endpoint + * config_ep_by_speed_and_alt() - configures the given endpoint * according to gadget speed. * @g: pointer to the gadget * @f: usb function * @_ep: the endpoint to configure + * @alt: alternate setting number * * Return: error code, 0 on success * @@ -147,12 +150,14 @@ next_ep_desc(struct usb_descriptor_header **t) * Note: the supplied function should hold all the descriptors * for supported speeds */ -int config_ep_by_speed(struct usb_gadget *g, - struct usb_function *f, - struct usb_ep *_ep) +int config_ep_by_speed_and_alt(struct usb_gadget *g, + struct usb_function *f, + struct usb_ep *_ep, + u8 alt) { struct usb_composite_dev *cdev; struct usb_endpoint_descriptor *chosen_desc = NULL; + struct usb_interface_descriptor *int_desc = NULL; struct usb_descriptor_header **speed_desc = NULL; struct usb_ss_ep_comp_descriptor *comp_desc = NULL; @@ -197,8 +202,20 @@ int config_ep_by_speed(struct usb_gadget *g, return -EIO; } + /* find correct alternate setting descriptor */ + for_each_desc(speed_desc, d_spd, USB_DT_INTERFACE) { + int_desc = (struct usb_interface_descriptor *)*d_spd; + + if (int_desc->bAlternateSetting == alt) { + speed_desc = d_spd; + goto intf_found; + } + } + return -EIO; + +intf_found: /* find descriptors */ - for_each_ep_desc(speed_desc, d_spd) { + for_each_desc(speed_desc, d_spd, USB_DT_ENDPOINT) { chosen_desc = (struct usb_endpoint_descriptor *)*d_spd; if (chosen_desc->bEndpointAddress == _ep->address) goto ep_found; @@ -248,6 +265,32 @@ int config_ep_by_speed(struct usb_gadget *g, } return 0; } +EXPORT_SYMBOL_GPL(config_ep_by_speed_and_alt); + +/** + * config_ep_by_speed() - configures the given endpoint + * according to gadget speed. + * @g: pointer to the gadget + * @f: usb function + * @_ep: the endpoint to configure + * + * Return: error code, 0 on success + * + * This function chooses the right descriptors for a given + * endpoint according to gadget speed and saves it in the + * endpoint desc field. If the endpoint already has a descriptor + * assigned to it - overwrites it with currently corresponding + * descriptor. The endpoint maxpacket field is updated according + * to the chosen descriptor. + * Note: the supplied function should hold all the descriptors + * for supported speeds + */ +int config_ep_by_speed(struct usb_gadget *g, + struct usb_function *f, + struct usb_ep *_ep) +{ + return config_ep_by_speed_and_alt(g, f, _ep, 0); +} EXPORT_SYMBOL_GPL(config_ep_by_speed); /** @@ -922,7 +965,7 @@ static int set_config(struct usb_composite_dev *cdev, if (!c) goto done; - place_marker("M - USB Device is enumerated"); + update_marker("M - USB Device is enumerated"); usb_gadget_set_state(gadget, USB_STATE_CONFIGURED); cdev->config = c; diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c index 3916b7efefd5d4f4bbfb15d2921520e41fe2691f..e56737e1d6587761276dad947c7a85291cae9777 100644 --- a/drivers/usb/gadget/configfs.c +++ b/drivers/usb/gadget/configfs.c @@ -307,6 +307,9 @@ static ssize_t gadget_dev_desc_UDC_store(struct config_item *item, char *name; int ret; + if (strlen(page) < len) + return -EOVERFLOW; + name = kstrdup(page, GFP_KERNEL); if (!name) return -ENOMEM; diff --git a/drivers/usb/gadget/function/f_ipc.c b/drivers/usb/gadget/function/f_ipc.c index d0c1613a1d27af4372201a5962617fb67eb55499..f9c20c8b0dacc3b8e136b224cc49a97380fbb930 100644 --- a/drivers/usb/gadget/function/f_ipc.c +++ b/drivers/usb/gadget/function/f_ipc.c @@ -276,7 +276,7 @@ static int ipc_write(struct platform_device *pdev, char *buf, /* Notify GPIO driver to wakup the host if host * is in suspend mode. */ - sb_notifier_call_chain(EVT_WAKE_UP, NULL); + sb_notifier_call_chain(EVENT_REQUEST_WAKE_UP, NULL); wait_event_interruptible(ipc_dev->state_wq, ipc_dev->online || ipc_dev->current_state == IPC_DISCONNECTED); pr_debug("%s: Interface ready, Retry IN request\n", __func__); @@ -298,7 +298,7 @@ static int ipc_write(struct platform_device *pdev, char *buf, * completion structure. */ } else if (ipc_dev->connected && !ipc_dev->online) { - sb_notifier_call_chain(EVT_WAKE_UP, NULL); + sb_notifier_call_chain(EVENT_REQUEST_WAKE_UP, NULL); reinit_completion(&ipc_dev->write_done); goto retry_write_done; } diff --git a/drivers/usb/gadget/legacy/audio.c b/drivers/usb/gadget/legacy/audio.c index 1f5cdbe162df7d69d2ffc66466905bdb0dedf0ba..2ce6e52b505bd3e9fed209b58eae5319152f654a 100644 --- a/drivers/usb/gadget/legacy/audio.c +++ b/drivers/usb/gadget/legacy/audio.c @@ -303,8 +303,10 @@ static int audio_bind(struct usb_composite_dev *cdev) struct usb_descriptor_header *usb_desc; usb_desc = usb_otg_descriptor_alloc(cdev->gadget); - if (!usb_desc) + if (!usb_desc) { + status = -ENOMEM; goto fail; + } usb_otg_descriptor_init(cdev->gadget, usb_desc); otg_desc[0] = usb_desc; otg_desc[1] = NULL; diff --git a/drivers/usb/gadget/legacy/cdc2.c b/drivers/usb/gadget/legacy/cdc2.c index 5ee25beb52f0d38c0bbe183bb3e3540fc06032d5..dc83e07bb242660d72d586c8f3f2009694a8bc18 100644 --- a/drivers/usb/gadget/legacy/cdc2.c +++ b/drivers/usb/gadget/legacy/cdc2.c @@ -183,8 +183,10 @@ static int cdc_bind(struct usb_composite_dev *cdev) struct usb_descriptor_header *usb_desc; usb_desc = usb_otg_descriptor_alloc(gadget); - if (!usb_desc) + if (!usb_desc) { + status = -ENOMEM; goto fail1; + } usb_otg_descriptor_init(gadget, usb_desc); otg_desc[0] = usb_desc; otg_desc[1] = NULL; diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c index 5c28bee327e15440210eb33cfb427f7a1fd659b3..e431a8bc3a9d2228e5d11ba09db054418b2643f3 100644 --- a/drivers/usb/gadget/legacy/inode.c +++ b/drivers/usb/gadget/legacy/inode.c @@ -1364,7 +1364,6 @@ gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl) req->buf = dev->rbuf; req->context = NULL; - value = -EOPNOTSUPP; switch (ctrl->bRequest) { case USB_REQ_GET_DESCRIPTOR: @@ -1788,7 +1787,7 @@ static ssize_t dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) { struct dev_data *dev = fd->private_data; - ssize_t value = len, length = len; + ssize_t value, length = len; unsigned total; u32 tag; char *kbuf; diff --git a/drivers/usb/gadget/legacy/ncm.c b/drivers/usb/gadget/legacy/ncm.c index 2fb4a847dd5262a9fe3c0c2a291abc3933faa6b4..5448cf4ff5889b36a1efcd6b057faf3f556d01ca 100644 --- a/drivers/usb/gadget/legacy/ncm.c +++ b/drivers/usb/gadget/legacy/ncm.c @@ -162,8 +162,10 @@ static int gncm_bind(struct usb_composite_dev *cdev) struct usb_descriptor_header *usb_desc; usb_desc = usb_otg_descriptor_alloc(gadget); - if (!usb_desc) + if (!usb_desc) { + status = -ENOMEM; goto fail; + } usb_otg_descriptor_init(gadget, usb_desc); otg_desc[0] = usb_desc; otg_desc[1] = NULL; diff --git a/drivers/usb/gadget/legacy/raw_gadget.c b/drivers/usb/gadget/legacy/raw_gadget.c index 76406343fbe577d2d8e9fafe03c72a27dba8a051..e01e366d89cd585cff3252c88e87cbe5691149d7 100644 --- a/drivers/usb/gadget/legacy/raw_gadget.c +++ b/drivers/usb/gadget/legacy/raw_gadget.c @@ -7,6 +7,7 @@ */ #include +#include #include #include #include @@ -81,6 +82,7 @@ static int raw_event_queue_add(struct raw_event_queue *queue, static struct usb_raw_event *raw_event_queue_fetch( struct raw_event_queue *queue) { + int ret; unsigned long flags; struct usb_raw_event *event; @@ -89,11 +91,18 @@ static struct usb_raw_event *raw_event_queue_fetch( * there's at least one event queued by decrementing the semaphore, * and then take the lock to protect queue struct fields. */ - if (down_interruptible(&queue->sema)) - return NULL; + ret = down_interruptible(&queue->sema); + if (ret) + return ERR_PTR(ret); spin_lock_irqsave(&queue->lock, flags); - if (WARN_ON(!queue->size)) - return NULL; + /* + * queue->size must have the same value as queue->sema counter (before + * the down_interruptible() call above), so this check is a fail-safe. + */ + if (WARN_ON(!queue->size)) { + spin_unlock_irqrestore(&queue->lock, flags); + return ERR_PTR(-ENODEV); + } event = queue->events[0]; queue->size--; memmove(&queue->events[0], &queue->events[1], @@ -115,8 +124,6 @@ static void raw_event_queue_destroy(struct raw_event_queue *queue) struct raw_dev; -#define USB_RAW_MAX_ENDPOINTS 32 - enum ep_state { STATE_EP_DISABLED, STATE_EP_ENABLED, @@ -126,6 +133,7 @@ struct raw_ep { struct raw_dev *dev; enum ep_state state; struct usb_ep *ep; + u8 addr; struct usb_request *req; bool urb_queued; bool disabling; @@ -160,7 +168,8 @@ struct raw_dev { bool ep0_out_pending; bool ep0_urb_queued; ssize_t ep0_status; - struct raw_ep eps[USB_RAW_MAX_ENDPOINTS]; + struct raw_ep eps[USB_RAW_EPS_NUM_MAX]; + int eps_num; struct completion ep0_done; struct raw_event_queue queue; @@ -194,8 +203,8 @@ static void dev_free(struct kref *kref) usb_ep_free_request(dev->gadget->ep0, dev->req); } raw_event_queue_destroy(&dev->queue); - for (i = 0; i < USB_RAW_MAX_ENDPOINTS; i++) { - if (dev->eps[i].state != STATE_EP_ENABLED) + for (i = 0; i < dev->eps_num; i++) { + if (dev->eps[i].state == STATE_EP_DISABLED) continue; usb_ep_disable(dev->eps[i].ep); usb_ep_free_request(dev->eps[i].ep, dev->eps[i].req); @@ -241,12 +250,26 @@ static void gadget_ep0_complete(struct usb_ep *ep, struct usb_request *req) complete(&dev->ep0_done); } +static u8 get_ep_addr(const char *name) +{ + /* If the endpoint has fixed function (named as e.g. "ep12out-bulk"), + * parse the endpoint address from its name. We deliberately use + * deprecated simple_strtoul() function here, as the number isn't + * followed by '\0' nor '\n'. + */ + if (isdigit(name[2])) + return simple_strtoul(&name[2], NULL, 10); + /* Otherwise the endpoint is configurable (named as e.g. "ep-a"). */ + return USB_RAW_EP_ADDR_ANY; +} + static int gadget_bind(struct usb_gadget *gadget, struct usb_gadget_driver *driver) { - int ret = 0; + int ret = 0, i = 0; struct raw_dev *dev = container_of(driver, struct raw_dev, driver); struct usb_request *req; + struct usb_ep *ep; unsigned long flags; if (strcmp(gadget->name, dev->udc_name) != 0) @@ -265,6 +288,13 @@ static int gadget_bind(struct usb_gadget *gadget, dev->req->context = dev; dev->req->complete = gadget_ep0_complete; dev->gadget = gadget; + gadget_for_each_ep(ep, dev->gadget) { + dev->eps[i].ep = ep; + dev->eps[i].addr = get_ep_addr(ep->name); + dev->eps[i].state = STATE_EP_DISABLED; + i++; + } + dev->eps_num = i; spin_unlock_irqrestore(&dev->lock, flags); /* Matches kref_put() in gadget_unbind(). */ @@ -392,9 +422,8 @@ static int raw_ioctl_init(struct raw_dev *dev, unsigned long value) char *udc_device_name; unsigned long flags; - ret = copy_from_user(&arg, (void __user *)value, sizeof(arg)); - if (ret) - return ret; + if (copy_from_user(&arg, (void __user *)value, sizeof(arg))) + return -EFAULT; switch (arg.speed) { case USB_SPEED_UNKNOWN: @@ -501,15 +530,13 @@ static int raw_ioctl_run(struct raw_dev *dev, unsigned long value) static int raw_ioctl_event_fetch(struct raw_dev *dev, unsigned long value) { - int ret = 0; struct usb_raw_event arg; unsigned long flags; struct usb_raw_event *event; uint32_t length; - ret = copy_from_user(&arg, (void __user *)value, sizeof(arg)); - if (ret) - return ret; + if (copy_from_user(&arg, (void __user *)value, sizeof(arg))) + return -EFAULT; spin_lock_irqsave(&dev->lock, flags); if (dev->state != STATE_DEV_RUNNING) { @@ -525,26 +552,32 @@ static int raw_ioctl_event_fetch(struct raw_dev *dev, unsigned long value) spin_unlock_irqrestore(&dev->lock, flags); event = raw_event_queue_fetch(&dev->queue); - if (!event) { + if (PTR_ERR(event) == -EINTR) { dev_dbg(&dev->gadget->dev, "event fetching interrupted\n"); return -EINTR; } + if (IS_ERR(event)) { + dev_err(&dev->gadget->dev, "failed to fetch event\n"); + spin_lock_irqsave(&dev->lock, flags); + dev->state = STATE_DEV_FAILED; + spin_unlock_irqrestore(&dev->lock, flags); + return -ENODEV; + } length = min(arg.length, event->length); - ret = copy_to_user((void __user *)value, event, - sizeof(*event) + length); - return ret; + if (copy_to_user((void __user *)value, event, sizeof(*event) + length)) + return -EFAULT; + + return 0; } static void *raw_alloc_io_data(struct usb_raw_ep_io *io, void __user *ptr, bool get_from_user) { - int ret; void *data; - ret = copy_from_user(io, ptr, sizeof(*io)); - if (ret) - return ERR_PTR(ret); - if (io->ep >= USB_RAW_MAX_ENDPOINTS) + if (copy_from_user(io, ptr, sizeof(*io))) + return ERR_PTR(-EFAULT); + if (io->ep >= USB_RAW_EPS_NUM_MAX) return ERR_PTR(-EINVAL); if (!usb_raw_io_flags_valid(io->flags)) return ERR_PTR(-EINVAL); @@ -658,42 +691,61 @@ static int raw_ioctl_ep0_read(struct raw_dev *dev, unsigned long value) if (IS_ERR(data)) return PTR_ERR(data); ret = raw_process_ep0_io(dev, &io, data, false); - if (ret < 0) { - kfree(data); - return ret; - } + if (ret < 0) + goto free; + length = min(io.length, (unsigned int)ret); - ret = copy_to_user((void __user *)(value + sizeof(io)), data, length); + if (copy_to_user((void __user *)(value + sizeof(io)), data, length)) + ret = -EFAULT; + else + ret = length; +free: kfree(data); return ret; } -static bool check_ep_caps(struct usb_ep *ep, - struct usb_endpoint_descriptor *desc) +static int raw_ioctl_ep0_stall(struct raw_dev *dev, unsigned long value) { - switch (usb_endpoint_type(desc)) { - case USB_ENDPOINT_XFER_ISOC: - if (!ep->caps.type_iso) - return false; - break; - case USB_ENDPOINT_XFER_BULK: - if (!ep->caps.type_bulk) - return false; - break; - case USB_ENDPOINT_XFER_INT: - if (!ep->caps.type_int) - return false; - break; - default: - return false; + int ret = 0; + unsigned long flags; + + if (value) + return -EINVAL; + spin_lock_irqsave(&dev->lock, flags); + if (dev->state != STATE_DEV_RUNNING) { + dev_dbg(dev->dev, "fail, device is not running\n"); + ret = -EINVAL; + goto out_unlock; + } + if (!dev->gadget) { + dev_dbg(dev->dev, "fail, gadget is not bound\n"); + ret = -EBUSY; + goto out_unlock; + } + if (dev->ep0_urb_queued) { + dev_dbg(&dev->gadget->dev, "fail, urb already queued\n"); + ret = -EBUSY; + goto out_unlock; } + if (!dev->ep0_in_pending && !dev->ep0_out_pending) { + dev_dbg(&dev->gadget->dev, "fail, no request pending\n"); + ret = -EBUSY; + goto out_unlock; + } + + ret = usb_ep_set_halt(dev->gadget->ep0); + if (ret < 0) + dev_err(&dev->gadget->dev, + "fail, usb_ep_set_halt returned %d\n", ret); - if (usb_endpoint_dir_in(desc) && !ep->caps.dir_in) - return false; - if (usb_endpoint_dir_out(desc) && !ep->caps.dir_out) - return false; + if (dev->ep0_in_pending) + dev->ep0_in_pending = false; + else + dev->ep0_out_pending = false; - return true; +out_unlock: + spin_unlock_irqrestore(&dev->lock, flags); + return ret; } static int raw_ioctl_ep_enable(struct raw_dev *dev, unsigned long value) @@ -701,7 +753,7 @@ static int raw_ioctl_ep_enable(struct raw_dev *dev, unsigned long value) int ret = 0, i; unsigned long flags; struct usb_endpoint_descriptor *desc; - struct usb_ep *ep = NULL; + struct raw_ep *ep; desc = memdup_user((void __user *)value, sizeof(*desc)); if (IS_ERR(desc)) @@ -729,41 +781,32 @@ static int raw_ioctl_ep_enable(struct raw_dev *dev, unsigned long value) goto out_free; } - for (i = 0; i < USB_RAW_MAX_ENDPOINTS; i++) { - if (dev->eps[i].state == STATE_EP_ENABLED) + for (i = 0; i < dev->eps_num; i++) { + ep = &dev->eps[i]; + if (ep->state != STATE_EP_DISABLED) continue; - break; - } - if (i == USB_RAW_MAX_ENDPOINTS) { - dev_dbg(&dev->gadget->dev, - "fail, no device endpoints available\n"); - ret = -EBUSY; - goto out_free; - } - - gadget_for_each_ep(ep, dev->gadget) { - if (ep->enabled) + if (ep->addr != usb_endpoint_num(desc) && + ep->addr != USB_RAW_EP_ADDR_ANY) continue; - if (!check_ep_caps(ep, desc)) + if (!usb_gadget_ep_match_desc(dev->gadget, ep->ep, desc, NULL)) continue; - ep->desc = desc; - ret = usb_ep_enable(ep); + ep->ep->desc = desc; + ret = usb_ep_enable(ep->ep); if (ret < 0) { dev_err(&dev->gadget->dev, "fail, usb_ep_enable returned %d\n", ret); goto out_free; } - dev->eps[i].req = usb_ep_alloc_request(ep, GFP_ATOMIC); - if (!dev->eps[i].req) { + ep->req = usb_ep_alloc_request(ep->ep, GFP_ATOMIC); + if (!ep->req) { dev_err(&dev->gadget->dev, "fail, usb_ep_alloc_request failed\n"); - usb_ep_disable(ep); + usb_ep_disable(ep->ep); ret = -ENOMEM; goto out_free; } - dev->eps[i].ep = ep; - dev->eps[i].state = STATE_EP_ENABLED; - ep->driver_data = &dev->eps[i]; + ep->state = STATE_EP_ENABLED; + ep->ep->driver_data = ep; ret = i; goto out_unlock; } @@ -782,10 +825,6 @@ static int raw_ioctl_ep_disable(struct raw_dev *dev, unsigned long value) { int ret = 0, i = value; unsigned long flags; - const void *desc; - - if (i < 0 || i >= USB_RAW_MAX_ENDPOINTS) - return -EINVAL; spin_lock_irqsave(&dev->lock, flags); if (dev->state != STATE_DEV_RUNNING) { @@ -798,7 +837,12 @@ static int raw_ioctl_ep_disable(struct raw_dev *dev, unsigned long value) ret = -EBUSY; goto out_unlock; } - if (dev->eps[i].state != STATE_EP_ENABLED) { + if (i < 0 || i >= dev->eps_num) { + dev_dbg(dev->dev, "fail, invalid endpoint\n"); + ret = -EBUSY; + goto out_unlock; + } + if (dev->eps[i].state == STATE_EP_DISABLED) { dev_dbg(&dev->gadget->dev, "fail, endpoint is not enabled\n"); ret = -EINVAL; goto out_unlock; @@ -822,10 +866,8 @@ static int raw_ioctl_ep_disable(struct raw_dev *dev, unsigned long value) spin_lock_irqsave(&dev->lock, flags); usb_ep_free_request(dev->eps[i].ep, dev->eps[i].req); - desc = dev->eps[i].ep->desc; - dev->eps[i].ep = NULL; + kfree(dev->eps[i].ep->desc); dev->eps[i].state = STATE_EP_DISABLED; - kfree(desc); dev->eps[i].disabling = false; out_unlock: @@ -833,6 +875,74 @@ static int raw_ioctl_ep_disable(struct raw_dev *dev, unsigned long value) return ret; } +static int raw_ioctl_ep_set_clear_halt_wedge(struct raw_dev *dev, + unsigned long value, bool set, bool halt) +{ + int ret = 0, i = value; + unsigned long flags; + + spin_lock_irqsave(&dev->lock, flags); + if (dev->state != STATE_DEV_RUNNING) { + dev_dbg(dev->dev, "fail, device is not running\n"); + ret = -EINVAL; + goto out_unlock; + } + if (!dev->gadget) { + dev_dbg(dev->dev, "fail, gadget is not bound\n"); + ret = -EBUSY; + goto out_unlock; + } + if (i < 0 || i >= dev->eps_num) { + dev_dbg(dev->dev, "fail, invalid endpoint\n"); + ret = -EBUSY; + goto out_unlock; + } + if (dev->eps[i].state == STATE_EP_DISABLED) { + dev_dbg(&dev->gadget->dev, "fail, endpoint is not enabled\n"); + ret = -EINVAL; + goto out_unlock; + } + if (dev->eps[i].disabling) { + dev_dbg(&dev->gadget->dev, + "fail, disable is in progress\n"); + ret = -EINVAL; + goto out_unlock; + } + if (dev->eps[i].urb_queued) { + dev_dbg(&dev->gadget->dev, + "fail, waiting for urb completion\n"); + ret = -EINVAL; + goto out_unlock; + } + if (usb_endpoint_xfer_isoc(dev->eps[i].ep->desc)) { + dev_dbg(&dev->gadget->dev, + "fail, can't halt/wedge ISO endpoint\n"); + ret = -EINVAL; + goto out_unlock; + } + + if (set && halt) { + ret = usb_ep_set_halt(dev->eps[i].ep); + if (ret < 0) + dev_err(&dev->gadget->dev, + "fail, usb_ep_set_halt returned %d\n", ret); + } else if (!set && halt) { + ret = usb_ep_clear_halt(dev->eps[i].ep); + if (ret < 0) + dev_err(&dev->gadget->dev, + "fail, usb_ep_clear_halt returned %d\n", ret); + } else if (set && !halt) { + ret = usb_ep_set_wedge(dev->eps[i].ep); + if (ret < 0) + dev_err(&dev->gadget->dev, + "fail, usb_ep_set_wedge returned %d\n", ret); + } + +out_unlock: + spin_unlock_irqrestore(&dev->lock, flags); + return ret; +} + static void gadget_ep_complete(struct usb_ep *ep, struct usb_request *req) { struct raw_ep *r_ep = (struct raw_ep *)ep->driver_data; @@ -854,7 +964,7 @@ static int raw_process_ep_io(struct raw_dev *dev, struct usb_raw_ep_io *io, { int ret = 0; unsigned long flags; - struct raw_ep *ep = &dev->eps[io->ep]; + struct raw_ep *ep; DECLARE_COMPLETION_ONSTACK(done); spin_lock_irqsave(&dev->lock, flags); @@ -868,6 +978,12 @@ static int raw_process_ep_io(struct raw_dev *dev, struct usb_raw_ep_io *io, ret = -EBUSY; goto out_unlock; } + if (io->ep >= dev->eps_num) { + dev_dbg(&dev->gadget->dev, "fail, invalid endpoint\n"); + ret = -EINVAL; + goto out_unlock; + } + ep = &dev->eps[io->ep]; if (ep->state != STATE_EP_ENABLED) { dev_dbg(&dev->gadget->dev, "fail, endpoint is not enabled\n"); ret = -EBUSY; @@ -952,12 +1068,15 @@ static int raw_ioctl_ep_read(struct raw_dev *dev, unsigned long value) if (IS_ERR(data)) return PTR_ERR(data); ret = raw_process_ep_io(dev, &io, data, false); - if (ret < 0) { - kfree(data); - return ret; - } + if (ret < 0) + goto free; + length = min(io.length, (unsigned int)ret); - ret = copy_to_user((void __user *)(value + sizeof(io)), data, length); + if (copy_to_user((void __user *)(value + sizeof(io)), data, length)) + ret = -EFAULT; + else + ret = length; +free: kfree(data); return ret; } @@ -1010,6 +1129,71 @@ static int raw_ioctl_vbus_draw(struct raw_dev *dev, unsigned long value) return ret; } +static void fill_ep_caps(struct usb_ep_caps *caps, + struct usb_raw_ep_caps *raw_caps) +{ + raw_caps->type_control = caps->type_control; + raw_caps->type_iso = caps->type_iso; + raw_caps->type_bulk = caps->type_bulk; + raw_caps->type_int = caps->type_int; + raw_caps->dir_in = caps->dir_in; + raw_caps->dir_out = caps->dir_out; +} + +static void fill_ep_limits(struct usb_ep *ep, struct usb_raw_ep_limits *limits) +{ + limits->maxpacket_limit = ep->maxpacket_limit; + limits->max_streams = ep->max_streams; +} + +static int raw_ioctl_eps_info(struct raw_dev *dev, unsigned long value) +{ + int ret = 0, i; + unsigned long flags; + struct usb_raw_eps_info *info; + struct raw_ep *ep; + + info = kmalloc(sizeof(*info), GFP_KERNEL); + if (!info) { + ret = -ENOMEM; + goto out; + } + + spin_lock_irqsave(&dev->lock, flags); + if (dev->state != STATE_DEV_RUNNING) { + dev_dbg(dev->dev, "fail, device is not running\n"); + ret = -EINVAL; + spin_unlock_irqrestore(&dev->lock, flags); + goto out_free; + } + if (!dev->gadget) { + dev_dbg(dev->dev, "fail, gadget is not bound\n"); + ret = -EBUSY; + spin_unlock_irqrestore(&dev->lock, flags); + goto out_free; + } + + memset(info, 0, sizeof(*info)); + for (i = 0; i < dev->eps_num; i++) { + ep = &dev->eps[i]; + strscpy(&info->eps[i].name[0], ep->ep->name, + USB_RAW_EP_NAME_MAX); + info->eps[i].addr = ep->addr; + fill_ep_caps(&ep->ep->caps, &info->eps[i].caps); + fill_ep_limits(ep->ep, &info->eps[i].limits); + } + ret = dev->eps_num; + spin_unlock_irqrestore(&dev->lock, flags); + + if (copy_to_user((void __user *)value, info, sizeof(*info))) + ret = -EFAULT; + +out_free: + kfree(info); +out: + return ret; +} + static long raw_ioctl(struct file *fd, unsigned int cmd, unsigned long value) { struct raw_dev *dev = fd->private_data; @@ -1052,6 +1236,24 @@ static long raw_ioctl(struct file *fd, unsigned int cmd, unsigned long value) case USB_RAW_IOCTL_VBUS_DRAW: ret = raw_ioctl_vbus_draw(dev, value); break; + case USB_RAW_IOCTL_EPS_INFO: + ret = raw_ioctl_eps_info(dev, value); + break; + case USB_RAW_IOCTL_EP0_STALL: + ret = raw_ioctl_ep0_stall(dev, value); + break; + case USB_RAW_IOCTL_EP_SET_HALT: + ret = raw_ioctl_ep_set_clear_halt_wedge( + dev, value, true, true); + break; + case USB_RAW_IOCTL_EP_CLEAR_HALT: + ret = raw_ioctl_ep_set_clear_halt_wedge( + dev, value, false, true); + break; + case USB_RAW_IOCTL_EP_SET_WEDGE: + ret = raw_ioctl_ep_set_clear_halt_wedge( + dev, value, true, false); + break; default: ret = -EINVAL; } diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c index 39676824a2c6fffdc4952318f8cdb4074d0a8cba..8540e52c28a97fac6b62291f10c1d2add8aed70b 100644 --- a/drivers/usb/gadget/udc/atmel_usba_udc.c +++ b/drivers/usb/gadget/udc/atmel_usba_udc.c @@ -912,7 +912,7 @@ static int usba_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req) u32 status; DBG(DBG_GADGET | DBG_QUEUE, "ep_dequeue: %s, req %p\n", - ep->ep.name, req); + ep->ep.name, _req); spin_lock_irqsave(&udc->lock, flags); diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c index 8ee76524a0b7800e42030123094e7045f9aa56c1..cc555f3944314a4acdbab26ed860a13bd9d3304c 100644 --- a/drivers/usb/gadget/udc/dummy_hcd.c +++ b/drivers/usb/gadget/udc/dummy_hcd.c @@ -23,6 +23,8 @@ * * Having this all in one kernel can help some stages of development, * bypassing some hardware (and driver) issues. UML could help too. + * + * Note: The emulation does not include isochronous transfers! */ #include @@ -138,6 +140,9 @@ static const struct { .caps = _caps, \ } +/* we don't provide isochronous endpoints since we don't support them */ +#define TYPE_BULK_OR_INT (USB_EP_CAPS_TYPE_BULK | USB_EP_CAPS_TYPE_INT) + /* everyone has ep0 */ EP_INFO(ep0name, USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_ALL)), @@ -146,64 +151,72 @@ static const struct { USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)), EP_INFO("ep2out-bulk", USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)), +/* EP_INFO("ep3in-iso", USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, USB_EP_CAPS_DIR_IN)), EP_INFO("ep4out-iso", USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, USB_EP_CAPS_DIR_OUT)), +*/ EP_INFO("ep5in-int", USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_IN)), EP_INFO("ep6in-bulk", USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)), EP_INFO("ep7out-bulk", USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)), +/* EP_INFO("ep8in-iso", USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, USB_EP_CAPS_DIR_IN)), EP_INFO("ep9out-iso", USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, USB_EP_CAPS_DIR_OUT)), +*/ EP_INFO("ep10in-int", USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_IN)), EP_INFO("ep11in-bulk", USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)), EP_INFO("ep12out-bulk", USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)), +/* EP_INFO("ep13in-iso", USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, USB_EP_CAPS_DIR_IN)), EP_INFO("ep14out-iso", USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, USB_EP_CAPS_DIR_OUT)), +*/ EP_INFO("ep15in-int", USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_IN)), + /* or like sa1100: two fixed function endpoints */ EP_INFO("ep1out-bulk", USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)), EP_INFO("ep2in-bulk", USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)), + /* and now some generic EPs so we have enough in multi config */ - EP_INFO("ep3out", - USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_OUT)), - EP_INFO("ep4in", - USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_IN)), - EP_INFO("ep5out", - USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_OUT)), - EP_INFO("ep6out", - USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_OUT)), - EP_INFO("ep7in", - USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_IN)), - EP_INFO("ep8out", - USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_OUT)), - EP_INFO("ep9in", - USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_IN)), - EP_INFO("ep10out", - USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_OUT)), - EP_INFO("ep11out", - USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_OUT)), - EP_INFO("ep12in", - USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_IN)), - EP_INFO("ep13out", - USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_OUT)), - EP_INFO("ep14in", - USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_IN)), - EP_INFO("ep15out", - USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_OUT)), + EP_INFO("ep-aout", + USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_OUT)), + EP_INFO("ep-bin", + USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_IN)), + EP_INFO("ep-cout", + USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_OUT)), + EP_INFO("ep-dout", + USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_OUT)), + EP_INFO("ep-ein", + USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_IN)), + EP_INFO("ep-fout", + USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_OUT)), + EP_INFO("ep-gin", + USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_IN)), + EP_INFO("ep-hout", + USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_OUT)), + EP_INFO("ep-iout", + USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_OUT)), + EP_INFO("ep-jin", + USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_IN)), + EP_INFO("ep-kout", + USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_OUT)), + EP_INFO("ep-lin", + USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_IN)), + EP_INFO("ep-mout", + USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_OUT)), #undef EP_INFO }; @@ -1938,13 +1951,17 @@ static void dummy_timer(unsigned long _dum_hcd) limit = total; switch (usb_pipetype(urb->pipe)) { case PIPE_ISOCHRONOUS: - /* FIXME is it urb->interval since the last xfer? - * use urb->iso_frame_desc[i]. - * complete whether or not ep has requests queued. - * report random errors, to debug drivers. + /* + * We don't support isochronous. But if we did, + * here are some of the issues we'd have to face: + * + * Is it urb->interval since the last xfer? + * Use urb->iso_frame_desc[i]. + * Complete whether or not ep has requests queued. + * Report random errors, to debug drivers. */ limit = max(limit, periodic_bytes(dum, ep)); - status = -ENOSYS; + status = -EINVAL; /* fail all xfers */ break; case PIPE_INTERRUPT: diff --git a/drivers/usb/gadget/udc/gr_udc.c b/drivers/usb/gadget/udc/gr_udc.c index feb73a1c42ef9ca554c3a5136045b37411a7eda5..be094f4e116b14380b5579a2aa00ef30d4c99838 100644 --- a/drivers/usb/gadget/udc/gr_udc.c +++ b/drivers/usb/gadget/udc/gr_udc.c @@ -2000,9 +2000,12 @@ static int gr_ep_init(struct gr_udc *dev, int num, int is_in, u32 maxplimit) if (num == 0) { _req = gr_alloc_request(&ep->ep, GFP_ATOMIC); + if (!_req) + return -ENOMEM; + buf = devm_kzalloc(dev->dev, PAGE_SIZE, GFP_DMA | GFP_ATOMIC); - if (!_req || !buf) { - /* possible _req freed by gr_probe via gr_remove */ + if (!buf) { + gr_free_request(&ep->ep, _req); return -ENOMEM; } diff --git a/drivers/usb/gadget/udc/lpc32xx_udc.c b/drivers/usb/gadget/udc/lpc32xx_udc.c index ac2aa04ca657363400095aa9002588406938e694..7107931617953543bf251cfc2911b1490afe242d 100644 --- a/drivers/usb/gadget/udc/lpc32xx_udc.c +++ b/drivers/usb/gadget/udc/lpc32xx_udc.c @@ -1615,17 +1615,17 @@ static int lpc32xx_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) { struct lpc32xx_ep *ep = container_of(_ep, struct lpc32xx_ep, ep); - struct lpc32xx_udc *udc = ep->udc; + struct lpc32xx_udc *udc; u16 maxpacket; u32 tmp; unsigned long flags; /* Verify EP data */ if ((!_ep) || (!ep) || (!desc) || - (desc->bDescriptorType != USB_DT_ENDPOINT)) { - dev_dbg(udc->dev, "bad ep or descriptor\n"); + (desc->bDescriptorType != USB_DT_ENDPOINT)) return -EINVAL; - } + + udc = ep->udc; maxpacket = usb_endpoint_maxp(desc); if ((maxpacket == 0) || (maxpacket > ep->maxpacket)) { dev_dbg(udc->dev, "bad ep descriptor's packet size\n"); @@ -1873,7 +1873,7 @@ static int lpc32xx_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req) static int lpc32xx_ep_set_halt(struct usb_ep *_ep, int value) { struct lpc32xx_ep *ep = container_of(_ep, struct lpc32xx_ep, ep); - struct lpc32xx_udc *udc = ep->udc; + struct lpc32xx_udc *udc; unsigned long flags; if ((!ep) || (ep->hwep_num <= 1)) @@ -1883,6 +1883,7 @@ static int lpc32xx_ep_set_halt(struct usb_ep *_ep, int value) if (ep->is_in) return -EAGAIN; + udc = ep->udc; spin_lock_irqsave(&udc->lock, flags); if (value == 1) { diff --git a/drivers/usb/gadget/udc/m66592-udc.c b/drivers/usb/gadget/udc/m66592-udc.c index 46ce7bc15f2b087b180d54eab2351f34aea79574..53abad98af6d86597c91604a69cb7c147d500fa7 100644 --- a/drivers/usb/gadget/udc/m66592-udc.c +++ b/drivers/usb/gadget/udc/m66592-udc.c @@ -1672,7 +1672,7 @@ static int m66592_probe(struct platform_device *pdev) err_add_udc: m66592_free_request(&m66592->ep[0].ep, m66592->ep0_req); - + m66592->ep0_req = NULL; clean_up3: if (m66592->pdata->on_chip) { clk_disable(m66592->clk); diff --git a/drivers/usb/gadget/udc/mv_udc_core.c b/drivers/usb/gadget/udc/mv_udc_core.c index 4103bf7cf52abb30586754abfd2242f7a62a0cf6..62fad60d0c061754b0db29cb27c7a4e11fc80a1f 100644 --- a/drivers/usb/gadget/udc/mv_udc_core.c +++ b/drivers/usb/gadget/udc/mv_udc_core.c @@ -2317,7 +2317,8 @@ static int mv_udc_probe(struct platform_device *pdev) return 0; err_create_workqueue: - destroy_workqueue(udc->qwork); + if (udc->qwork) + destroy_workqueue(udc->qwork); err_destroy_dma: dma_pool_destroy(udc->dtd_pool); err_free_dma: diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c index 7fb31a3b53e6e960d5689640239144049ddc712c..89476090c1791cf9b42d5ae78a108b43d5bfed14 100644 --- a/drivers/usb/gadget/udc/net2272.c +++ b/drivers/usb/gadget/udc/net2272.c @@ -2666,6 +2666,8 @@ net2272_plat_probe(struct platform_device *pdev) err_req: release_mem_region(base, len); err: + kfree(dev); + return ret; } diff --git a/drivers/usb/gadget/udc/s3c2410_udc.c b/drivers/usb/gadget/udc/s3c2410_udc.c index 394abd5d65c0236ed654461b2631be8c87549788..cf12ca567e692af161f8ea6ddafc38ff43ad2f88 100644 --- a/drivers/usb/gadget/udc/s3c2410_udc.c +++ b/drivers/usb/gadget/udc/s3c2410_udc.c @@ -268,10 +268,6 @@ static void s3c2410_udc_done(struct s3c2410_ep *ep, static void s3c2410_udc_nuke(struct s3c2410_udc *udc, struct s3c2410_ep *ep, int status) { - /* Sanity check */ - if (&ep->queue == NULL) - return; - while (!list_empty(&ep->queue)) { struct s3c2410_request *req; req = list_entry(ep->queue.next, struct s3c2410_request, diff --git a/drivers/usb/host/ehci-exynos.c b/drivers/usb/host/ehci-exynos.c index 26b641100639aa30aabe098c8155d50ef3a40a25..be72a625dc21753c1c3a970d0c37425a3c1d3b87 100644 --- a/drivers/usb/host/ehci-exynos.c +++ b/drivers/usb/host/ehci-exynos.c @@ -199,9 +199,8 @@ static int exynos_ehci_probe(struct platform_device *pdev) hcd->rsrc_len = resource_size(res); irq = platform_get_irq(pdev, 0); - if (!irq) { - dev_err(&pdev->dev, "Failed to get IRQ\n"); - err = -ENODEV; + if (irq < 0) { + err = irq; goto fail_io; } diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c index c7a9b31eeaeff03a6c422d47c476fda4b03e5cb0..637079a350032307dbea12018f1df59334bacfe5 100644 --- a/drivers/usb/host/ehci-mxc.c +++ b/drivers/usb/host/ehci-mxc.c @@ -63,6 +63,8 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev) } irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; hcd = usb_create_hcd(&ehci_mxc_hc_driver, dev, dev_name(dev)); if (!hcd) diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c index 93326974ff4b3ac3ec413ceda9d6cfa54cca0046..265c9af1d2b5bbb56bf84b4bdb9b3fc83c89bfb1 100644 --- a/drivers/usb/host/ehci-pci.c +++ b/drivers/usb/host/ehci-pci.c @@ -229,6 +229,13 @@ static int ehci_pci_setup(struct usb_hcd *hcd) ehci_info(ehci, "applying MosChip frame-index workaround\n"); ehci->frame_index_bug = 1; break; + case PCI_VENDOR_ID_HUAWEI: + /* Synopsys HC bug */ + if (pdev->device == 0xa239) { + ehci_info(ehci, "applying Synopsys HC workaround\n"); + ehci->has_synopsys_hc_bug = 1; + } + break; } /* optional debug port, normally in the first BAR */ diff --git a/drivers/usb/host/ohci-sm501.c b/drivers/usb/host/ohci-sm501.c index d4e0f7cd96fa4454276dbb55f71aa6b051691220..b5592fb518e3558dce6680d6df4bc93e5a332ec6 100644 --- a/drivers/usb/host/ohci-sm501.c +++ b/drivers/usb/host/ohci-sm501.c @@ -195,6 +195,7 @@ static int ohci_hcd_sm501_drv_remove(struct platform_device *pdev) struct resource *mem; usb_remove_hcd(hcd); + iounmap(hcd->regs); release_mem_region(hcd->rsrc_start, hcd->rsrc_len); usb_put_hcd(hcd); dma_release_declared_memory(&pdev->dev); diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c index defaf950e63142ea2d1eb6627ac696ebd5ca5786..7120dbfaacaba38513662d178b69bf641ce125e8 100644 --- a/drivers/usb/host/xhci-mtk-sch.c +++ b/drivers/usb/host/xhci-mtk-sch.c @@ -284,6 +284,10 @@ static bool need_bw_sch(struct usb_host_endpoint *ep, if (is_fs_or_ls(speed) && !has_tt) return false; + /* skip endpoint with zero maxpkt */ + if (usb_endpoint_maxp(&ep->desc) == 0) + return false; + return true; } diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c index 35aecbcac6f7258005ebad8b43e4fde4d47df561..945e108cffb8d31d156b57804bde45e800ee4f95 100644 --- a/drivers/usb/host/xhci-mtk.c +++ b/drivers/usb/host/xhci-mtk.c @@ -726,6 +726,9 @@ static int xhci_mtk_remove(struct platform_device *dev) struct xhci_hcd *xhci = hcd_to_xhci(hcd); struct usb_hcd *shared_hcd = xhci->shared_hcd; + pm_runtime_put_noidle(&dev->dev); + pm_runtime_disable(&dev->dev); + usb_remove_hcd(shared_hcd); xhci->shared_hcd = NULL; xhci_mtk_phy_power_off(mtk); @@ -738,8 +741,6 @@ static int xhci_mtk_remove(struct platform_device *dev) xhci_mtk_sch_exit(mtk); xhci_mtk_clks_disable(mtk); xhci_mtk_ldos_disable(mtk); - pm_runtime_put_sync(&dev->dev); - pm_runtime_disable(&dev->dev); return 0; } diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index c01a0d1e8b5ccb40b7b06a65fc96b7a93c2de90f..d4e29039305b81113b721f8924d961639e49c3d9 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -235,6 +235,9 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && pdev->device == 0x1142) xhci->quirks |= XHCI_TRUST_TX_LENGTH; + if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && + pdev->device == 0x2142) + xhci->quirks |= XHCI_NO_64BIT_SUPPORT; if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && pdev->device == PCI_DEVICE_ID_ASMEDIA_1042A_XHCI) diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c index 6102ac50d753e96d0aa65280421687bcec31d227..fc9947c7c0225578f3f299e1c0b852a4ca38b9a2 100644 --- a/drivers/usb/host/xhci-plat.c +++ b/drivers/usb/host/xhci-plat.c @@ -430,6 +430,7 @@ static int xhci_plat_remove(struct platform_device *dev) struct clk *clk = xhci->clk; struct usb_hcd *shared_hcd = xhci->shared_hcd; + pm_runtime_get_sync(&dev->dev); xhci->xhc_state |= XHCI_STATE_REMOVING; device_remove_file(&dev->dev, &dev_attr_config_imod); @@ -444,8 +445,9 @@ static int xhci_plat_remove(struct platform_device *dev) clk_disable_unprepare(clk); usb_put_hcd(hcd); - pm_runtime_set_suspended(&dev->dev); pm_runtime_disable(&dev->dev); + pm_runtime_put_noidle(&dev->dev); + pm_runtime_set_suspended(&dev->dev); return 0; } diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 0ece43c0cf112e0adf20280cc0249852956505b1..f0139af84c95eda5a39ada6dd05a5c2c04f8bc1d 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -3403,8 +3403,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, /* New sg entry */ --num_sgs; sent_len -= block_len; - if (num_sgs != 0) { - sg = sg_next(sg); + sg = sg_next(sg); + if (num_sgs != 0 && sg) { block_len = sg_dma_len(sg); addr = (u64) sg_dma_address(sg); addr += sent_len; diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 67adfc973dc2d462c534d1a504064273d80c09de..b713f9e7768b769c476c0e4651aa187f326da1f1 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -1381,6 +1381,7 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id, xhci->devs[slot_id]->out_ctx, ep_index); ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index); + ep_ctx->ep_info &= cpu_to_le32(~EP_STATE_MASK);/* must clear */ ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK); ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size)); @@ -4260,6 +4261,9 @@ static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd, mutex_lock(hcd->bandwidth_mutex); xhci_change_max_exit_latency(xhci, udev, 0); mutex_unlock(hcd->bandwidth_mutex); + readl_poll_timeout(port_array[port_num], pm_val, + (pm_val & PORT_PLS_MASK) == XDEV_U0, + 100, 10000); return 0; } } diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 76c839717f16d3375137fc541d0dae90a2badae9..2d7706d0f54db260d1438ebc17e999a6836a76cd 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -718,7 +718,7 @@ struct xhci_ep_ctx { * 4 - TRB error * 5-7 - reserved */ -#define EP_STATE_MASK (0xf) +#define EP_STATE_MASK (0x7) #define EP_STATE_DISABLED 0 #define EP_STATE_RUNNING 1 #define EP_STATE_HALTED 2 diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c index b3fc602b2e247ea150a49c13323ae66cde2b6b4b..49476c0304b272f87917e5e5a4389f94fa6f2f9c 100644 --- a/drivers/usb/misc/usbtest.c +++ b/drivers/usb/misc/usbtest.c @@ -2769,6 +2769,7 @@ static void usbtest_disconnect(struct usb_interface *intf) usb_set_intfdata(intf, NULL); dev_dbg(&intf->dev, "disconnect\n"); + kfree(dev->buf); kfree(dev); } diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index dca39c9a13b0e8cfa98418847d18fe1ec5cca963..962bf792f658cae56cf3af9ece3e1805672e9ae4 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c @@ -2749,6 +2749,13 @@ static int musb_resume(struct device *dev) musb_enable_interrupts(musb); musb_platform_enable(musb); + /* session might be disabled in suspend */ + if (musb->port_mode == MUSB_HOST && + !(musb->ops->quirks & MUSB_PRESERVE_SESSION)) { + devctl |= MUSB_DEVCTL_SESSION; + musb_writeb(musb->mregs, MUSB_DEVCTL, devctl); + } + spin_lock_irqsave(&musb->lock, flags); error = musb_run_resume_work(musb); if (error) diff --git a/drivers/usb/musb/musb_debugfs.c b/drivers/usb/musb/musb_debugfs.c index 952733ceaac8d47bfc544adb1fa8c787609d7790..480b2119f1e7d7debfa128d1ee1d8fe7aaf9a3eb 100644 --- a/drivers/usb/musb/musb_debugfs.c +++ b/drivers/usb/musb/musb_debugfs.c @@ -206,6 +206,11 @@ static ssize_t musb_test_mode_write(struct file *file, u8 test; char buf[24]; + memset(buf, 0x00, sizeof(buf)); + + if (copy_from_user(buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) + return -EFAULT; + pm_runtime_get_sync(musb->controller); test = musb_readb(musb->mregs, MUSB_TESTMODE); if (test) { @@ -214,11 +219,6 @@ static ssize_t musb_test_mode_write(struct file *file, goto ret; } - memset(buf, 0x00, sizeof(buf)); - - if (copy_from_user(buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) - return -EFAULT; - if (strstarts(buf, "force host full-speed")) test = MUSB_TEST_FORCE_HOST | MUSB_TEST_FORCE_FS; diff --git a/drivers/usb/pd/policy_engine.c b/drivers/usb/pd/policy_engine.c index dfc8332982f5a5b42a4f322b86afe546ceb4ac71..4dbaef61768bdbf6d60feeb40c0d255d200a2a5e 100644 --- a/drivers/usb/pd/policy_engine.c +++ b/drivers/usb/pd/policy_engine.c @@ -2586,6 +2586,7 @@ static void usbpd_sm(struct work_struct *w) pd->forced_pr = POWER_SUPPLY_TYPEC_PR_NONE; pd->current_state = PE_UNKNOWN; + pd_reset_protocol(pd); kobject_uevent(&pd->dev.kobj, KOBJ_CHANGE); dual_role_instance_changed(pd->dual_role); diff --git a/drivers/usb/phy/phy-msm-qusb-v2.c b/drivers/usb/phy/phy-msm-qusb-v2.c index 174dab19de2507c013a81081661e9f018f65e1b0..75d4aa92a3a90d27f582df878d9567d4b7278fc1 100644 --- a/drivers/usb/phy/phy-msm-qusb-v2.c +++ b/drivers/usb/phy/phy-msm-qusb-v2.c @@ -379,8 +379,8 @@ static void qusb_phy_get_tune1_param(struct qusb_phy *qphy) bit_mask = (bit_mask << qphy->efuse_num_of_bits) - 1; /* - * if efuse reg is updated (i.e non-zero) then use it to program - * tune parameters + * For 8nm zero is treated as a valid efuse value and driver + * should program the tune1 reg based on efuse value */ qphy->tune_val = readl_relaxed(qphy->efuse_reg); pr_debug("%s(): bit_mask:%d efuse based tune1 value:%d\n", @@ -389,10 +389,8 @@ static void qusb_phy_get_tune1_param(struct qusb_phy *qphy) qphy->tune_val = TUNE_VAL_MASK(qphy->tune_val, qphy->efuse_bit_pos, bit_mask); reg = readb_relaxed(qphy->base + qphy->phy_reg[PORT_TUNE1]); - if (qphy->tune_val) { - reg = reg & 0x0f; - reg |= (qphy->tune_val << 4); - } + reg = reg & 0x0f; + reg |= (qphy->tune_val << 4); qphy->tune_val = reg; } diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c index 31cd798d2dac5ca6338df392fb32487c93b7ba22..d45a3f4e752c7532d49a764154d83d48e83ee63d 100644 --- a/drivers/usb/serial/ch341.c +++ b/drivers/usb/serial/ch341.c @@ -84,6 +84,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x4348, 0x5523) }, + { USB_DEVICE(0x1a86, 0x7522) }, { USB_DEVICE(0x1a86, 0x7523) }, { USB_DEVICE(0x1a86, 0x5523) }, { }, diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c index d0aa4c853f56a0fd87e2f2846cada8783c94caf2..a1b5d20a56b9bc1fdddcb97c05189ff8f84bd7f9 100644 --- a/drivers/usb/serial/cypress_m8.c +++ b/drivers/usb/serial/cypress_m8.c @@ -63,6 +63,7 @@ static const struct usb_device_id id_table_earthmate[] = { static const struct usb_device_id id_table_cyphidcomrs232[] = { { USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) }, + { USB_DEVICE(VENDOR_ID_SAI, PRODUCT_ID_CYPHIDCOM) }, { USB_DEVICE(VENDOR_ID_POWERCOM, PRODUCT_ID_UPS) }, { USB_DEVICE(VENDOR_ID_FRWD, PRODUCT_ID_CYPHIDCOM_FRWD) }, { } /* Terminating entry */ @@ -77,6 +78,7 @@ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB) }, { USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB_LT20) }, { USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) }, + { USB_DEVICE(VENDOR_ID_SAI, PRODUCT_ID_CYPHIDCOM) }, { USB_DEVICE(VENDOR_ID_POWERCOM, PRODUCT_ID_UPS) }, { USB_DEVICE(VENDOR_ID_FRWD, PRODUCT_ID_CYPHIDCOM_FRWD) }, { USB_DEVICE(VENDOR_ID_DAZZLE, PRODUCT_ID_CA42) }, diff --git a/drivers/usb/serial/cypress_m8.h b/drivers/usb/serial/cypress_m8.h index 35e223751c0e7988b254ab24a0c781a5bea8b4f3..16b7410ad0575a8d4948f39d1774f668c1c22a70 100644 --- a/drivers/usb/serial/cypress_m8.h +++ b/drivers/usb/serial/cypress_m8.h @@ -25,6 +25,9 @@ #define VENDOR_ID_CYPRESS 0x04b4 #define PRODUCT_ID_CYPHIDCOM 0x5500 +/* Simply Automated HID->COM UPB PIM (using Cypress PID 0x5500) */ +#define VENDOR_ID_SAI 0x17dd + /* FRWD Dongle - a GPS sports watch */ #define VENDOR_ID_FRWD 0x6737 #define PRODUCT_ID_CYPHIDCOM_FRWD 0x0001 diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c index 91e7e3a166a5cde158ac1b99b5f8b9f5d1f3ee10..c67a17faaa3ca4ebe73cd7263c0f57fbe3b7fca0 100644 --- a/drivers/usb/serial/garmin_gps.c +++ b/drivers/usb/serial/garmin_gps.c @@ -1161,8 +1161,8 @@ static void garmin_read_process(struct garmin_data *garmin_data_p, send it directly to the tty port */ if (garmin_data_p->flags & FLAGS_QUEUING) { pkt_add(garmin_data_p, data, data_length); - } else if (bulk_data || - getLayerId(data) == GARMIN_LAYERID_APPL) { + } else if (bulk_data || (data_length >= sizeof(u32) && + getLayerId(data) == GARMIN_LAYERID_APPL)) { spin_lock_irqsave(&garmin_data_p->lock, flags); garmin_data_p->flags |= APP_RESP_SEEN; diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c index 18fc992a245fcd8c700aea392ed2a1fcf9d584ea..a1a11f8bb2a3dae74422d4362c130c58d988d680 100644 --- a/drivers/usb/serial/iuu_phoenix.c +++ b/drivers/usb/serial/iuu_phoenix.c @@ -704,14 +704,16 @@ static int iuu_uart_write(struct tty_struct *tty, struct usb_serial_port *port, struct iuu_private *priv = usb_get_serial_port_data(port); unsigned long flags; - if (count > 256) - return -ENOMEM; - spin_lock_irqsave(&priv->lock, flags); + count = min(count, 256 - priv->writelen); + if (count == 0) + goto out; + /* fill the buffer */ memcpy(priv->writebuf + priv->writelen, buf, count); priv->writelen += count; +out: spin_unlock_irqrestore(&priv->lock, flags); return count; diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 3621bde2a0ed7b064ac8069d953b8be0048cfb6b..1e41240cdd43afd2b4b80dcaffdaa734ec3795bf 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -248,6 +248,7 @@ static void option_instat_callback(struct urb *urb); /* These Quectel products use Quectel's vendor ID */ #define QUECTEL_PRODUCT_EC21 0x0121 #define QUECTEL_PRODUCT_EC25 0x0125 +#define QUECTEL_PRODUCT_EG95 0x0195 #define QUECTEL_PRODUCT_BG96 0x0296 #define QUECTEL_PRODUCT_EP06 0x0306 #define QUECTEL_PRODUCT_EM12 0x0512 @@ -1100,6 +1101,8 @@ static const struct usb_device_id option_ids[] = { .driver_info = RSVD(4) }, { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25), .driver_info = RSVD(4) }, + { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95), + .driver_info = RSVD(4) }, { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96), .driver_info = RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff), @@ -1160,6 +1163,10 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_SINGLE) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UE910_V2) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1031, 0xff), /* Telit LE910C1-EUX */ + .driver_info = NCTRL(0) | RSVD(3) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1033, 0xff), /* Telit LE910C1-EUX (ECM) */ + .driver_info = NCTRL(0) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG0), .driver_info = RSVD(0) | RSVD(1) | NCTRL(2) | RSVD(3) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG1), @@ -2026,6 +2033,9 @@ static const struct usb_device_id option_ids[] = { .driver_info = RSVD(4) | RSVD(5) }, { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff), /* Fibocom NL678 series */ .driver_info = RSVD(6) }, + { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */ + { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) }, /* GosunCn GM500 MBIM */ + { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1406, 0xff) }, /* GosunCn GM500 ECM/NCM */ { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, option_ids); diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index 55a8fb25ce2be2600f22679d9fef2cca8c6243ab..27b4082f4d1904e719989e5c8ffbdc4c6079ea0d 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c @@ -177,6 +177,8 @@ static const struct usb_device_id id_table[] = { {DEVICE_SWI(0x413c, 0x81b3)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */ {DEVICE_SWI(0x413c, 0x81b5)}, /* Dell Wireless 5811e QDL */ {DEVICE_SWI(0x413c, 0x81b6)}, /* Dell Wireless 5811e QDL */ + {DEVICE_SWI(0x413c, 0x81cb)}, /* Dell Wireless 5816e QDL */ + {DEVICE_SWI(0x413c, 0x81cc)}, /* Dell Wireless 5816e */ {DEVICE_SWI(0x413c, 0x81cf)}, /* Dell Wireless 5819 */ {DEVICE_SWI(0x413c, 0x81d0)}, /* Dell Wireless 5819 */ {DEVICE_SWI(0x413c, 0x81d1)}, /* Dell Wireless 5818 */ diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c index 95e9576c2fe6741fb01cf040edda63feae7fdbb8..4fab7ec9cd3fd624da6bf475db95a7aa0b6d2fa0 100644 --- a/drivers/usb/serial/usb_wwan.c +++ b/drivers/usb/serial/usb_wwan.c @@ -302,6 +302,10 @@ static void usb_wwan_indat_callback(struct urb *urb) if (status) { dev_dbg(dev, "%s: nonzero status: %d on endpoint %02x.\n", __func__, status, endpoint); + + /* don't resubmit on fatal errors */ + if (status == -ESHUTDOWN || status == -ENOENT) + return; } else { if (urb->actual_length) { tty_insert_flip_string(&port->port, data, diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h index 0eb8c67ee13821f34e7b9c44c55eb9f8b330c116..4df15faa66d710ad4e49fc011bc9c7b59aeb3532 100644 --- a/drivers/usb/storage/unusual_uas.h +++ b/drivers/usb/storage/unusual_uas.h @@ -41,6 +41,13 @@ * and don't forget to CC: the USB development list */ +/* Reported-by: Julian Groß */ +UNUSUAL_DEV(0x059f, 0x105f, 0x0000, 0x9999, + "LaCie", + "2Big Quadra USB3", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_NO_REPORT_OPCODES), + /* * Apricorn USB3 dongle sometimes returns "USBSUSBSUSBS" in response to SCSI * commands in UAS mode. Observed with the 1.28 firmware; are there others? diff --git a/drivers/vfio/mdev/mdev_sysfs.c b/drivers/vfio/mdev/mdev_sysfs.c index 802df210929ba5d52924c102867cbe55ba365963..7e474e41c85e35aaa991ce923609562c44eec679 100644 --- a/drivers/vfio/mdev/mdev_sysfs.c +++ b/drivers/vfio/mdev/mdev_sysfs.c @@ -113,7 +113,7 @@ struct mdev_type *add_mdev_supported_type(struct mdev_parent *parent, "%s-%s", dev_driver_string(parent->dev), group->name); if (ret) { - kfree(type); + kobject_put(&type->kobj); return ERR_PTR(ret); } diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c index 423ea1f98441a27e1ac351a14f29fb10ad462e16..36bc8f104e42ee180fe5b44273a3f961c723e475 100644 --- a/drivers/vfio/pci/vfio_pci_config.c +++ b/drivers/vfio/pci/vfio_pci_config.c @@ -1464,7 +1464,12 @@ static int vfio_cap_init(struct vfio_pci_device *vdev) if (ret) return ret; - if (cap <= PCI_CAP_ID_MAX) { + /* + * ID 0 is a NULL capability, conflicting with our fake + * PCI_CAP_ID_BASIC. As it has no content, consider it + * hidden for now. + */ + if (cap && cap <= PCI_CAP_ID_MAX) { len = pci_cap_length[cap]; if (len == 0xFF) { /* Variable length */ len = vfio_cap_len(vdev, cap, pos); @@ -1732,8 +1737,11 @@ void vfio_config_free(struct vfio_pci_device *vdev) vdev->vconfig = NULL; kfree(vdev->pci_config_map); vdev->pci_config_map = NULL; - kfree(vdev->msi_perm); - vdev->msi_perm = NULL; + if (vdev->msi_perm) { + free_perm_bits(vdev->msi_perm); + kfree(vdev->msi_perm); + vdev->msi_perm = NULL; + } } /* diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index 834e88e20550f2c1729946447dea1ee033e41b7d..3f2f34ebf51f56746b7caca57c31d052f0106652 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c @@ -182,14 +182,14 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock, break; } - vhost_add_used(vq, head, sizeof(pkt->hdr) + payload_len); - added = true; - - /* Deliver to monitoring devices all correctly transmitted - * packets. + /* Deliver to monitoring devices all packets that we + * will transmit. */ virtio_transport_deliver_tap_pkt(pkt); + vhost_add_used(vq, head, sizeof(pkt->hdr) + payload_len); + added = true; + pkt->off += payload_len; total_len += payload_len; diff --git a/drivers/video/backlight/lp855x_bl.c b/drivers/video/backlight/lp855x_bl.c index 939f057836e1981223ee1ab0dbaa62288fe7b976..4cdc7a3f6dc5c616c776351f5b86f9cc27185f05 100644 --- a/drivers/video/backlight/lp855x_bl.c +++ b/drivers/video/backlight/lp855x_bl.c @@ -460,7 +460,7 @@ static int lp855x_probe(struct i2c_client *cl, const struct i2c_device_id *id) ret = regulator_enable(lp->enable); if (ret < 0) { dev_err(lp->dev, "failed to enable vddio: %d\n", ret); - return ret; + goto disable_supply; } /* @@ -475,24 +475,34 @@ static int lp855x_probe(struct i2c_client *cl, const struct i2c_device_id *id) ret = lp855x_configure(lp); if (ret) { dev_err(lp->dev, "device config err: %d", ret); - return ret; + goto disable_vddio; } ret = lp855x_backlight_register(lp); if (ret) { dev_err(lp->dev, "failed to register backlight. err: %d\n", ret); - return ret; + goto disable_vddio; } ret = sysfs_create_group(&lp->dev->kobj, &lp855x_attr_group); if (ret) { dev_err(lp->dev, "failed to register sysfs. err: %d\n", ret); - return ret; + goto disable_vddio; } backlight_update_status(lp->bl); + return 0; + +disable_vddio: + if (lp->enable) + regulator_disable(lp->enable); +disable_supply: + if (lp->supply) + regulator_disable(lp->supply); + + return ret; } static int lp855x_remove(struct i2c_client *cl) @@ -501,6 +511,8 @@ static int lp855x_remove(struct i2c_client *cl) lp->bl->props.brightness = 0; backlight_update_status(lp->bl); + if (lp->enable) + regulator_disable(lp->enable); if (lp->supply) regulator_disable(lp->supply); sysfs_remove_group(&lp->dev->kobj, &lp855x_attr_group); diff --git a/drivers/video/fbdev/core/bitblit.c b/drivers/video/fbdev/core/bitblit.c index 790900d646c03cf2c96871e9373c367a58edc83e..5dce2b10c09aca08a038f9d03b52a7dca9e82d82 100644 --- a/drivers/video/fbdev/core/bitblit.c +++ b/drivers/video/fbdev/core/bitblit.c @@ -216,7 +216,7 @@ static void bit_clear_margins(struct vc_data *vc, struct fb_info *info, region.color = color; region.rop = ROP_COPY; - if (rw && !bottom_only) { + if ((int) rw > 0 && !bottom_only) { region.dx = info->var.xoffset + rs; region.dy = 0; region.width = rw; @@ -224,7 +224,7 @@ static void bit_clear_margins(struct vc_data *vc, struct fb_info *info, info->fbops->fb_fillrect(info, ®ion); } - if (bh) { + if ((int) bh > 0) { region.dx = info->var.xoffset; region.dy = info->var.yoffset + bs; region.width = rs; diff --git a/drivers/video/fbdev/core/fbcon_ccw.c b/drivers/video/fbdev/core/fbcon_ccw.c index 37a8b0b225663780e6e28dd4ba239a6be9581071..e0b9fbe7ca9e1cc39ee8c322cec39a8dc40e7770 100644 --- a/drivers/video/fbdev/core/fbcon_ccw.c +++ b/drivers/video/fbdev/core/fbcon_ccw.c @@ -201,7 +201,7 @@ static void ccw_clear_margins(struct vc_data *vc, struct fb_info *info, region.color = color; region.rop = ROP_COPY; - if (rw && !bottom_only) { + if ((int) rw > 0 && !bottom_only) { region.dx = 0; region.dy = info->var.yoffset; region.height = rw; @@ -209,7 +209,7 @@ static void ccw_clear_margins(struct vc_data *vc, struct fb_info *info, info->fbops->fb_fillrect(info, ®ion); } - if (bh) { + if ((int) bh > 0) { region.dx = info->var.xoffset + bs; region.dy = 0; region.height = info->var.yres_virtual; diff --git a/drivers/video/fbdev/core/fbcon_cw.c b/drivers/video/fbdev/core/fbcon_cw.c index 1888f8c866e82ecc35571fe9ad35493f0feedef2..158e6ea1c0f644d03f8cde5011005af96e023f4f 100644 --- a/drivers/video/fbdev/core/fbcon_cw.c +++ b/drivers/video/fbdev/core/fbcon_cw.c @@ -184,7 +184,7 @@ static void cw_clear_margins(struct vc_data *vc, struct fb_info *info, region.color = color; region.rop = ROP_COPY; - if (rw && !bottom_only) { + if ((int) rw > 0 && !bottom_only) { region.dx = 0; region.dy = info->var.yoffset + rs; region.height = rw; @@ -192,7 +192,7 @@ static void cw_clear_margins(struct vc_data *vc, struct fb_info *info, info->fbops->fb_fillrect(info, ®ion); } - if (bh) { + if ((int) bh > 0) { region.dx = info->var.xoffset; region.dy = info->var.yoffset; region.height = info->var.yres; diff --git a/drivers/video/fbdev/core/fbcon_ud.c b/drivers/video/fbdev/core/fbcon_ud.c index f98eee263597b08a9677dc8f0072fc62a2cba022..3df26963816673a4e3f68bda1449a4989a9737ad 100644 --- a/drivers/video/fbdev/core/fbcon_ud.c +++ b/drivers/video/fbdev/core/fbcon_ud.c @@ -231,7 +231,7 @@ static void ud_clear_margins(struct vc_data *vc, struct fb_info *info, region.color = color; region.rop = ROP_COPY; - if (rw && !bottom_only) { + if ((int) rw > 0 && !bottom_only) { region.dy = 0; region.dx = info->var.xoffset; region.width = rw; @@ -239,7 +239,7 @@ static void ud_clear_margins(struct vc_data *vc, struct fb_info *info, info->fbops->fb_fillrect(info, ®ion); } - if (bh) { + if ((int) bh > 0) { region.dy = info->var.yoffset; region.dx = info->var.xoffset; region.height = bh; diff --git a/drivers/video/fbdev/msm/mdss_dsi.c b/drivers/video/fbdev/msm/mdss_dsi.c index c8195f95369933f9b31d47bea593d1a94789dbc6..4010a033b5974317a81ba2fbfc771fae6cb5ae8f 100644 --- a/drivers/video/fbdev/msm/mdss_dsi.c +++ b/drivers/video/fbdev/msm/mdss_dsi.c @@ -444,11 +444,62 @@ static int mdss_dsi_panel_power_lp(struct mdss_panel_data *pdata, int enable) return 0; } -static int mdss_dsi_panel_power_ctrl(struct mdss_panel_data *pdata, +static int mdss_dsi_panel_power_ulp(struct mdss_panel_data *pdata, + int enable) +{ + int ret = 0, i; + struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL; + u32 mode = enable ? DSS_REG_MODE_ULP : DSS_REG_MODE_ENABLE; + struct dsi_shared_data *sdata; + + pr_debug("%s: +\n", __func__); + if (pdata == NULL) { + pr_err("%s: Invalid input data\n", __func__); + return -EINVAL; + } + + ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata, + panel_data); + sdata = ctrl_pdata->shared_data; + + for (i = 0; i < DSI_MAX_PM; i++) { + /* + * Core power module need to be controlled along with + * DSI core clocks. + */ + if (i == DSI_CORE_PM) + continue; + if (i == DSI_PANEL_PM) + ret = msm_dss_config_vreg_opt_mode( + ctrl_pdata->panel_power_data.vreg_config, + ctrl_pdata->panel_power_data.num_vreg, mode); + else + ret = msm_dss_config_vreg_opt_mode( + sdata->power_data[i].vreg_config, + sdata->power_data[i].num_vreg, mode); + if (ret) { + pr_err("%s: failed to config ulp opt mode for %s.rc=%d\n", + __func__, __mdss_dsi_pm_name(i), ret); + break; + } + } + + if (ret) { + mode = enable ? DSS_REG_MODE_ENABLE : DSS_REG_MODE_ULP; + for (; i >= 0; i--) + msm_dss_config_vreg_opt_mode( + ctrl_pdata->power_data[i].vreg_config, + ctrl_pdata->power_data[i].num_vreg, mode); + } + return ret; +} + +int mdss_dsi_panel_power_ctrl(struct mdss_panel_data *pdata, int power_state) { int ret = 0; struct mdss_panel_info *pinfo; + struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL; if (pdata == NULL) { pr_err("%s: Invalid input data\n", __func__); @@ -456,7 +507,8 @@ static int mdss_dsi_panel_power_ctrl(struct mdss_panel_data *pdata, } pinfo = &pdata->panel_info; - pr_debug("%s: cur_power_state=%d req_power_state=%d\n", __func__, + pr_debug("%pS->%s: cur_power_state=%d req_power_state=%d\n", + __builtin_return_address(0), __func__, pinfo->panel_power_state, power_state); if (pinfo->panel_power_state == power_state) { @@ -464,6 +516,9 @@ static int mdss_dsi_panel_power_ctrl(struct mdss_panel_data *pdata, return 0; } + ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata, + panel_data); + /* * If a dynamic mode switch is pending, the regulators should not * be turned off or on. @@ -480,14 +535,31 @@ static int mdss_dsi_panel_power_ctrl(struct mdss_panel_data *pdata, ret = mdss_dsi_panel_power_off(pdata); break; case MDSS_PANEL_POWER_ON: - if (mdss_dsi_is_panel_on_lp(pdata)) + if (mdss_dsi_is_panel_on_ulp(pdata)) { + ret = mdss_dsi_panel_power_ulp(pdata, false); + goto end; + } else if (mdss_dsi_is_panel_on_lp(pdata)) { ret = mdss_dsi_panel_power_lp(pdata, false); - else + goto end; + } else { ret = mdss_dsi_panel_power_on(pdata); + } break; case MDSS_PANEL_POWER_LP1: + if (mdss_dsi_is_panel_on_ulp(pdata)) + ret = mdss_dsi_panel_power_ulp(pdata, false); + else + ret = mdss_dsi_panel_power_lp(pdata, true); + /* + * temp workaround until framework issues pertaining to LP2 + * power state transitions are fixed. For now, we internally + * transition to LP2 state whenever core power is turned off + * in LP1 state + */ + break; case MDSS_PANEL_POWER_LP2: - ret = mdss_dsi_panel_power_lp(pdata, true); + if (!ctrl_pdata->core_power) + ret = mdss_dsi_panel_power_ulp(pdata, true); break; default: pr_err("%s: unknown panel power state requested (%d)\n", @@ -497,7 +569,7 @@ static int mdss_dsi_panel_power_ctrl(struct mdss_panel_data *pdata, if (!ret) pinfo->panel_power_state = power_state; - +end: return ret; } @@ -618,6 +690,16 @@ int mdss_dsi_get_dt_vreg_data(struct device *dev, } mp->vreg_config[i].load[DSS_REG_MODE_DISABLE] = tmp; + rc = of_property_read_u32(supply_node, + "qcom,supply-ulp-load", &tmp); + if (rc) { + pr_warn("%s: error reading ulp load. rc=%d\n", + __func__, rc); + rc = 0; + } + mp->vreg_config[i].load[DSS_REG_MODE_ULP] = (!rc ? tmp : + mp->vreg_config[i].load[DSS_REG_MODE_ENABLE]); + /* pre-sleep */ rc = of_property_read_u32(supply_node, "qcom,supply-pre-on-sleep", &tmp); diff --git a/drivers/video/fbdev/msm/mdss_dsi.h b/drivers/video/fbdev/msm/mdss_dsi.h index da704cbe96827a900c1902345ac60c15038e6945..9193d1008005ced254424f83254f6fc2c82193a6 100644 --- a/drivers/video/fbdev/msm/mdss_dsi.h +++ b/drivers/video/fbdev/msm/mdss_dsi.h @@ -726,6 +726,7 @@ void mdss_dsi_cfg_lane_ctrl(struct mdss_dsi_ctrl_pdata *ctrl, void mdss_dsi_set_reg(struct mdss_dsi_ctrl_pdata *ctrl, int off, u32 mask, u32 val); int mdss_dsi_phy_pll_reset_status(struct mdss_dsi_ctrl_pdata *ctrl); +int mdss_dsi_panel_power_ctrl(struct mdss_panel_data *pdata, int power_state); int mdss_dsi_check_panel_status(struct mdss_dsi_ctrl_pdata *ctrl, void *arg); void mdss_dsi_debug_bus_init(struct mdss_dsi_data *sdata); @@ -938,6 +939,11 @@ static inline bool mdss_dsi_is_panel_on_lp(struct mdss_panel_data *pdata) return mdss_panel_is_power_on_lp(pdata->panel_info.panel_power_state); } +static inline bool mdss_dsi_is_panel_on_ulp(struct mdss_panel_data *pdata) +{ + return mdss_panel_is_power_on_ulp(pdata->panel_info.panel_power_state); +} + static inline bool mdss_dsi_ulps_feature_enabled( struct mdss_panel_data *pdata) { diff --git a/drivers/video/fbdev/msm/msm_mdss_io_8974.c b/drivers/video/fbdev/msm/msm_mdss_io_8974.c index 223e1ee8e57786843dd906d89baa45f68ac4fda0..971f55d0817d7c53563833b9a30353d528871830 100644 --- a/drivers/video/fbdev/msm/msm_mdss_io_8974.c +++ b/drivers/video/fbdev/msm/msm_mdss_io_8974.c @@ -2698,7 +2698,8 @@ int mdss_dsi_post_clkoff_cb(void *priv, * supplies which support turning off in low power * state */ - if (ctrl->ctrl_state & CTRL_STATE_DSI_ACTIVE) + if ((ctrl->ctrl_state & CTRL_STATE_DSI_ACTIVE) && + (i != DSI_CORE_PM)) if (!sdata->power_data[i].vreg_config ->lp_disable_allowed) continue; @@ -2721,6 +2722,16 @@ int mdss_dsi_post_clkoff_cb(void *priv, ctrl->core_power = false; } } + + /* + * temp workaround until framework issues pertaining to LP2 + * power state transitions are fixed. For now, we internally + * transition to LP2 state whenever core power is turned off + * in LP1 state + */ + if (mdss_dsi_is_panel_on_lp(pdata)) + mdss_dsi_panel_power_ctrl(pdata, + MDSS_PANEL_POWER_LP2); } return rc; } @@ -2758,6 +2769,7 @@ int mdss_dsi_pre_clkon_cb(void *priv, for (i = DSI_CORE_PM; i < DSI_MAX_PM; i++) { if ((ctrl->ctrl_state & CTRL_STATE_DSI_ACTIVE) && (!pdata->panel_info.cont_splash_enabled) && + (i != DSI_CORE_PM) && (!sdata->power_data[i].vreg_config ->lp_disable_allowed)) continue; @@ -2778,6 +2790,14 @@ int mdss_dsi_pre_clkon_cb(void *priv, } } + /* + * temp workaround until framework issues pertaining to LP2 + * power state transitions are fixed. For now, if we intend to + * send a frame update when in LP1, we have to explicitly exit + * LP2 state here + */ + if (mdss_dsi_is_panel_on_ulp(pdata)) + mdss_dsi_panel_power_ctrl(pdata, MDSS_PANEL_POWER_LP1); } /* Disable dynamic clock gating*/ diff --git a/drivers/video/fbdev/w100fb.c b/drivers/video/fbdev/w100fb.c index d570e19a286494da5f3524d23ff7a3f2d0267cd9..ffda1d68fb057d69ac3d3fe7b25d3ea8f374f01a 100644 --- a/drivers/video/fbdev/w100fb.c +++ b/drivers/video/fbdev/w100fb.c @@ -583,6 +583,7 @@ static void w100fb_restore_vidmem(struct w100fb_par *par) memsize=par->mach->mem->size; memcpy_toio(remapped_fbuf + (W100_FB_BASE-MEM_WINDOW_BASE), par->saved_extmem, memsize); vfree(par->saved_extmem); + par->saved_extmem = NULL; } if (par->saved_intmem) { memsize=MEM_INT_SIZE; @@ -591,6 +592,7 @@ static void w100fb_restore_vidmem(struct w100fb_par *par) else memcpy_toio(remapped_fbuf + (W100_FB_BASE-MEM_WINDOW_BASE), par->saved_intmem, memsize); vfree(par->saved_intmem); + par->saved_intmem = NULL; } } diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c index 3099052e1243379c1b96cf26e513811dcd192347..0667bc6e7d2391c11c8a1e5c8f9f9e15f37d24f1 100644 --- a/drivers/w1/masters/omap_hdq.c +++ b/drivers/w1/masters/omap_hdq.c @@ -176,7 +176,7 @@ static int hdq_write_byte(struct hdq_data *hdq_data, u8 val, u8 *status) /* check irqstatus */ if (!(*status & OMAP_HDQ_INT_STATUS_TXCOMPLETE)) { dev_dbg(hdq_data->dev, "timeout waiting for" - " TXCOMPLETE/RXCOMPLETE, %x", *status); + " TXCOMPLETE/RXCOMPLETE, %x\n", *status); ret = -ETIMEDOUT; goto out; } @@ -187,7 +187,7 @@ static int hdq_write_byte(struct hdq_data *hdq_data, u8 val, u8 *status) OMAP_HDQ_FLAG_CLEAR, &tmp_status); if (ret) { dev_dbg(hdq_data->dev, "timeout waiting GO bit" - " return to zero, %x", tmp_status); + " return to zero, %x\n", tmp_status); } out: @@ -203,7 +203,7 @@ static irqreturn_t hdq_isr(int irq, void *_hdq) spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags); hdq_data->hdq_irqstatus = hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS); spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags); - dev_dbg(hdq_data->dev, "hdq_isr: %x", hdq_data->hdq_irqstatus); + dev_dbg(hdq_data->dev, "hdq_isr: %x\n", hdq_data->hdq_irqstatus); if (hdq_data->hdq_irqstatus & (OMAP_HDQ_INT_STATUS_TXCOMPLETE | OMAP_HDQ_INT_STATUS_RXCOMPLETE @@ -311,7 +311,7 @@ static int omap_hdq_break(struct hdq_data *hdq_data) tmp_status = hdq_data->hdq_irqstatus; /* check irqstatus */ if (!(tmp_status & OMAP_HDQ_INT_STATUS_TIMEOUT)) { - dev_dbg(hdq_data->dev, "timeout waiting for TIMEOUT, %x", + dev_dbg(hdq_data->dev, "timeout waiting for TIMEOUT, %x\n", tmp_status); ret = -ETIMEDOUT; goto out; @@ -338,7 +338,7 @@ static int omap_hdq_break(struct hdq_data *hdq_data) &tmp_status); if (ret) dev_dbg(hdq_data->dev, "timeout waiting INIT&GO bits" - " return to zero, %x", tmp_status); + " return to zero, %x\n", tmp_status); out: mutex_unlock(&hdq_data->hdq_mutex); diff --git a/drivers/watchdog/da9062_wdt.c b/drivers/watchdog/da9062_wdt.c index 79383ff620199c20db7d97f61366a24d499d3760..1443386bb590bdaa10cf66da6a5d5c2dcdf3104f 100644 --- a/drivers/watchdog/da9062_wdt.c +++ b/drivers/watchdog/da9062_wdt.c @@ -94,11 +94,6 @@ static int da9062_wdt_update_timeout_register(struct da9062_watchdog *wdt, unsigned int regval) { struct da9062 *chip = wdt->hw; - int ret; - - ret = da9062_reset_watchdog_timer(wdt); - if (ret) - return ret; return regmap_update_bits(chip->regmap, DA9062AA_CONTROL_D, diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c index 52e03f1c76e383e1d70675a6f4057144af34de0b..21c3ffdc8a09d415093297b39d811dad56305c93 100644 --- a/drivers/watchdog/watchdog_dev.c +++ b/drivers/watchdog/watchdog_dev.c @@ -38,7 +38,6 @@ #include /* For __init/__exit/... */ #include /* For timeout functions */ #include /* For printk/panic/... */ -#include /* For data references */ #include /* For handling misc devices */ #include /* For module stuff/... */ #include /* For mutexes */ @@ -53,14 +52,14 @@ /* * struct watchdog_core_data - watchdog core internal data - * @kref: Reference count. + * @dev: The watchdog's internal device * @cdev: The watchdog's Character device. * @wdd: Pointer to watchdog device. * @lock: Lock for watchdog core. * @status: Watchdog core internal status bits. */ struct watchdog_core_data { - struct kref kref; + struct device dev; struct cdev cdev; struct watchdog_device *wdd; struct mutex lock; @@ -802,7 +801,7 @@ static int watchdog_open(struct inode *inode, struct file *file) file->private_data = wd_data; if (!hw_running) - kref_get(&wd_data->kref); + get_device(&wd_data->dev); /* dev/watchdog is a virtual (and thus non-seekable) filesystem */ return nonseekable_open(inode, file); @@ -814,11 +813,11 @@ static int watchdog_open(struct inode *inode, struct file *file) return err; } -static void watchdog_core_data_release(struct kref *kref) +static void watchdog_core_data_release(struct device *dev) { struct watchdog_core_data *wd_data; - wd_data = container_of(kref, struct watchdog_core_data, kref); + wd_data = container_of(dev, struct watchdog_core_data, dev); kfree(wd_data); } @@ -878,7 +877,7 @@ static int watchdog_release(struct inode *inode, struct file *file) */ if (!running) { module_put(wd_data->cdev.owner); - kref_put(&wd_data->kref, watchdog_core_data_release); + put_device(&wd_data->dev); } return 0; } @@ -897,17 +896,22 @@ static struct miscdevice watchdog_miscdev = { .fops = &watchdog_fops, }; +static struct class watchdog_class = { + .name = "watchdog", + .owner = THIS_MODULE, + .dev_groups = wdt_groups, +}; + /* * watchdog_cdev_register: register watchdog character device * @wdd: watchdog device - * @devno: character device number * * Register a watchdog character device including handling the legacy * /dev/watchdog node. /dev/watchdog is actually a miscdevice and * thus we set it up like that. */ -static int watchdog_cdev_register(struct watchdog_device *wdd, dev_t devno) +static int watchdog_cdev_register(struct watchdog_device *wdd) { struct watchdog_core_data *wd_data; int err; @@ -915,7 +919,6 @@ static int watchdog_cdev_register(struct watchdog_device *wdd, dev_t devno) wd_data = kzalloc(sizeof(struct watchdog_core_data), GFP_KERNEL); if (!wd_data) return -ENOMEM; - kref_init(&wd_data->kref); mutex_init(&wd_data->lock); wd_data->wdd = wdd; @@ -942,23 +945,33 @@ static int watchdog_cdev_register(struct watchdog_device *wdd, dev_t devno) } } + device_initialize(&wd_data->dev); + wd_data->dev.devt = MKDEV(MAJOR(watchdog_devt), wdd->id); + wd_data->dev.class = &watchdog_class; + wd_data->dev.parent = wdd->parent; + wd_data->dev.groups = wdd->groups; + wd_data->dev.release = watchdog_core_data_release; + dev_set_drvdata(&wd_data->dev, wdd); + dev_set_name(&wd_data->dev, "watchdog%d", wdd->id); + /* Fill in the data structures */ cdev_init(&wd_data->cdev, &watchdog_fops); - wd_data->cdev.owner = wdd->ops->owner; /* Add the device */ - err = cdev_add(&wd_data->cdev, devno, 1); + err = cdev_device_add(&wd_data->cdev, &wd_data->dev); if (err) { pr_err("watchdog%d unable to add device %d:%d\n", wdd->id, MAJOR(watchdog_devt), wdd->id); if (wdd->id == 0) { misc_deregister(&watchdog_miscdev); old_wd_data = NULL; - kref_put(&wd_data->kref, watchdog_core_data_release); + put_device(&wd_data->dev); } return err; } + wd_data->cdev.owner = wdd->ops->owner; + /* Record time of most recent heartbeat as 'just before now'. */ wd_data->last_hw_keepalive = jiffies - 1; @@ -968,7 +981,7 @@ static int watchdog_cdev_register(struct watchdog_device *wdd, dev_t devno) */ if (watchdog_hw_running(wdd)) { __module_get(wdd->ops->owner); - kref_get(&wd_data->kref); + get_device(&wd_data->dev); if (handle_boot_enabled) queue_delayed_work(watchdog_wq, &wd_data->work, 0); else @@ -991,7 +1004,7 @@ static void watchdog_cdev_unregister(struct watchdog_device *wdd) { struct watchdog_core_data *wd_data = wdd->wd_data; - cdev_del(&wd_data->cdev); + cdev_device_del(&wd_data->cdev, &wd_data->dev); if (wdd->id == 0) { misc_deregister(&watchdog_miscdev); old_wd_data = NULL; @@ -1009,15 +1022,9 @@ static void watchdog_cdev_unregister(struct watchdog_device *wdd) cancel_delayed_work_sync(&wd_data->work); - kref_put(&wd_data->kref, watchdog_core_data_release); + put_device(&wd_data->dev); } -static struct class watchdog_class = { - .name = "watchdog", - .owner = THIS_MODULE, - .dev_groups = wdt_groups, -}; - /* * watchdog_dev_register: register a watchdog device * @wdd: watchdog device @@ -1029,27 +1036,14 @@ static struct class watchdog_class = { int watchdog_dev_register(struct watchdog_device *wdd) { - struct device *dev; - dev_t devno; int ret; - devno = MKDEV(MAJOR(watchdog_devt), wdd->id); - - ret = watchdog_cdev_register(wdd, devno); + ret = watchdog_cdev_register(wdd); if (ret) return ret; - dev = device_create_with_groups(&watchdog_class, wdd->parent, - devno, wdd, wdd->groups, - "watchdog%d", wdd->id); - if (IS_ERR(dev)) { - watchdog_cdev_unregister(wdd); - return PTR_ERR(dev); - } - ret = watchdog_register_pretimeout(wdd); if (ret) { - device_destroy(&watchdog_class, devno); watchdog_cdev_unregister(wdd); } @@ -1067,7 +1061,6 @@ int watchdog_dev_register(struct watchdog_device *wdd) void watchdog_dev_unregister(struct watchdog_device *wdd) { watchdog_unregister_pretimeout(wdd); - device_destroy(&watchdog_class, wdd->wd_data->cdev.dev); watchdog_cdev_unregister(wdd); } diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c index 58be15c27b6d6d340c065d43daf7fcd98c0ee293..62a0c4111dc4b237d28a577d1f3699595edf6653 100644 --- a/drivers/xen/pvcalls-back.c +++ b/drivers/xen/pvcalls-back.c @@ -1104,7 +1104,8 @@ static void set_backend_state(struct xenbus_device *dev, case XenbusStateInitialised: switch (state) { case XenbusStateConnected: - backend_connect(dev); + if (backend_connect(dev)) + return; xenbus_switch_state(dev, XenbusStateConnected); break; case XenbusStateClosing: diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 166846a40078a95bd18e7c141eed86fb8cf8515b..2c433c95adb5d470f92658ba0342fb9816b6caee 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -1740,7 +1740,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t, (!regset->active || regset->active(t->task, regset) > 0)) { int ret; size_t size = regset->n * regset->size; - void *data = kmalloc(size, GFP_KERNEL); + void *data = kzalloc(size, GFP_KERNEL); if (unlikely(!data)) return 0; ret = regset->get(t->task, regset, diff --git a/fs/block_dev.c b/fs/block_dev.c index b873e824edc12b3c380eb1d19ae77ff9b2fb8d91..a245256aea2b8fab85da3669e2a9f9a460c84869 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -1439,10 +1439,8 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) */ if (!for_part) { ret = devcgroup_inode_permission(bdev->bd_inode, perm); - if (ret != 0) { - bdput(bdev); + if (ret != 0) return ret; - } } restart: @@ -1515,8 +1513,10 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) goto out_clear; BUG_ON(for_part); ret = __blkdev_get(whole, mode, 1); - if (ret) + if (ret) { + bdput(whole); goto out_clear; + } bdev->bd_contains = whole; bdev->bd_part = disk_get_part(disk, partno); if (!(disk->flags & GENHD_FL_UP) || @@ -1570,7 +1570,6 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) put_disk(disk); module_put(owner); out: - bdput(bdev); return ret; } @@ -1656,6 +1655,9 @@ int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder) bdput(whole); } + if (res) + bdput(bdev); + return res; } EXPORT_SYMBOL(blkdev_get); diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c index e4d5e6eae409a89d6aa6e17ffa34f957ec1a344e..1cf75d1032e1750a09affc17aa791597dc66bbb9 100644 --- a/fs/btrfs/backref.c +++ b/fs/btrfs/backref.c @@ -1420,6 +1420,7 @@ static int btrfs_find_all_roots_safe(struct btrfs_trans_handle *trans, if (ret < 0 && ret != -ENOENT) { ulist_free(tmp); ulist_free(*roots); + *roots = NULL; return ret; } node = ulist_next(tmp, &uiter); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 51e26f90f0bb28da6a064815ce9554834cc43cec..63b8812ba508aa5c40873efd9f66c2b1286b2188 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -10554,7 +10554,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, path = btrfs_alloc_path(); if (!path) { ret = -ENOMEM; - goto out_put_group; + goto out; } /* @@ -10591,7 +10591,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, ret = btrfs_orphan_add(trans, BTRFS_I(inode)); if (ret) { btrfs_add_delayed_iput(inode); - goto out_put_group; + goto out; } clear_nlink(inode); /* One for the block groups ref */ @@ -10614,13 +10614,13 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1); if (ret < 0) - goto out_put_group; + goto out; if (ret > 0) btrfs_release_path(path); if (ret == 0) { ret = btrfs_del_item(trans, tree_root, path); if (ret) - goto out_put_group; + goto out; btrfs_release_path(path); } @@ -10629,6 +10629,9 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, &fs_info->block_group_cache_tree); RB_CLEAR_NODE(&block_group->cache_node); + /* Once for the block groups rbtree */ + btrfs_put_block_group(block_group); + if (fs_info->first_logical_byte == block_group->key.objectid) fs_info->first_logical_byte = (u64)-1; spin_unlock(&fs_info->block_group_cache_lock); @@ -10778,10 +10781,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, ret = remove_block_group_free_space(trans, fs_info, block_group); if (ret) - goto out_put_group; - - /* Once for the block groups rbtree */ - btrfs_put_block_group(block_group); + goto out; ret = btrfs_search_slot(trans, root, &key, path, -1, 1); if (ret > 0) @@ -10791,10 +10791,9 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, ret = btrfs_del_item(trans, root, path); -out_put_group: +out: /* Once for the lookup reference */ btrfs_put_block_group(block_group); -out: btrfs_free_path(path); return ret; } diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index a485e34f2c705eac68b406b28b1f3e0ebab4af81..60e82b7899d20812cbf1ddc6cc395a8a05122468 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1721,7 +1721,8 @@ static int __process_pages_contig(struct address_space *mapping, if (!PageDirty(pages[i]) || pages[i]->mapping != mapping) { unlock_page(pages[i]); - put_page(pages[i]); + for (; i < ret; i++) + put_page(pages[i]); err = -EAGAIN; goto out; } @@ -4851,25 +4852,28 @@ struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info, static void check_buffer_tree_ref(struct extent_buffer *eb) { int refs; - /* the ref bit is tricky. We have to make sure it is set - * if we have the buffer dirty. Otherwise the - * code to free a buffer can end up dropping a dirty - * page + /* + * The TREE_REF bit is first set when the extent_buffer is added + * to the radix tree. It is also reset, if unset, when a new reference + * is created by find_extent_buffer. * - * Once the ref bit is set, it won't go away while the - * buffer is dirty or in writeback, and it also won't - * go away while we have the reference count on the - * eb bumped. + * It is only cleared in two cases: freeing the last non-tree + * reference to the extent_buffer when its STALE bit is set or + * calling releasepage when the tree reference is the only reference. * - * We can't just set the ref bit without bumping the - * ref on the eb because free_extent_buffer might - * see the ref bit and try to clear it. If this happens - * free_extent_buffer might end up dropping our original - * ref by mistake and freeing the page before we are able - * to add one more ref. + * In both cases, care is taken to ensure that the extent_buffer's + * pages are not under io. However, releasepage can be concurrently + * called with creating new references, which is prone to race + * conditions between the calls to check_buffer_tree_ref in those + * codepaths and clearing TREE_REF in try_release_extent_buffer. * - * So bump the ref count first, then set the bit. If someone - * beat us to it, drop the ref we added. + * The actual lifetime of the extent_buffer in the radix tree is + * adequately protected by the refcount, but the TREE_REF bit and + * its corresponding reference are not. To protect against this + * class of races, we call check_buffer_tree_ref from the codepaths + * which trigger io after they set eb->io_pages. Note that once io is + * initiated, TREE_REF can no longer be cleared, so that is the + * moment at which any such race is best fixed. */ refs = atomic_read(&eb->refs); if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) @@ -5333,6 +5337,11 @@ int read_extent_buffer_pages(struct extent_io_tree *tree, clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags); eb->read_mirror = 0; atomic_set(&eb->io_pages, num_reads); + /* + * It is possible for releasepage to clear the TREE_REF bit before we + * set io_pages. See check_buffer_tree_ref for a more detailed comment. + */ + check_buffer_tree_ref(eb); for (i = 0; i < num_pages; i++) { page = eb->pages[i]; diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index 717d82d51bb13c619b66c3e3d967b33166d7b77a..edd5f152e448751a9afef3ce0a6f92d7ee577b46 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -795,10 +795,12 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, nritems = btrfs_header_nritems(path->nodes[0]); if (!nritems || (path->slots[0] >= nritems - 1)) { ret = btrfs_next_leaf(root, path); - if (ret == 1) + if (ret < 0) { + goto out; + } else if (ret > 0) { found_next = 1; - if (ret != 0) goto insert; + } slot = path->slots[0]; } btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 2a196bb134d9febda7f7d31eea396dfab4531f5b..57908ee964a20d4d1ce64108f7fc70c51586ba65 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -978,8 +978,8 @@ static noinline int cow_file_range(struct inode *inode, u64 alloc_hint = 0; u64 num_bytes; unsigned long ram_size; - u64 disk_num_bytes; u64 cur_alloc_size = 0; + u64 min_alloc_size; u64 blocksize = fs_info->sectorsize; struct btrfs_key ins; struct extent_map *em; @@ -996,7 +996,6 @@ static noinline int cow_file_range(struct inode *inode, num_bytes = ALIGN(end - start + 1, blocksize); num_bytes = max(blocksize, num_bytes); - disk_num_bytes = num_bytes; inode_should_defrag(BTRFS_I(inode), start, end, num_bytes, SZ_64K); @@ -1023,17 +1022,32 @@ static noinline int cow_file_range(struct inode *inode, } } - BUG_ON(disk_num_bytes > - btrfs_super_total_bytes(fs_info->super_copy)); + BUG_ON(num_bytes > btrfs_super_total_bytes(fs_info->super_copy)); alloc_hint = get_extent_allocation_hint(inode, start, num_bytes); btrfs_drop_extent_cache(BTRFS_I(inode), start, start + num_bytes - 1, 0); - while (disk_num_bytes > 0) { - cur_alloc_size = disk_num_bytes; + /* + * Relocation relies on the relocated extents to have exactly the same + * size as the original extents. Normally writeback for relocation data + * extents follows a NOCOW path because relocation preallocates the + * extents. However, due to an operation such as scrub turning a block + * group to RO mode, it may fallback to COW mode, so we must make sure + * an extent allocated during COW has exactly the requested size and can + * not be split into smaller extents, otherwise relocation breaks and + * fails during the stage where it updates the bytenr of file extent + * items. + */ + if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID) + min_alloc_size = num_bytes; + else + min_alloc_size = fs_info->sectorsize; + + while (num_bytes > 0) { + cur_alloc_size = num_bytes; ret = btrfs_reserve_extent(root, cur_alloc_size, cur_alloc_size, - fs_info->sectorsize, 0, alloc_hint, + min_alloc_size, 0, alloc_hint, &ins, 1, 1); if (ret < 0) goto out_unlock; @@ -1097,11 +1111,10 @@ static noinline int cow_file_range(struct inode *inode, delalloc_end, locked_page, EXTENT_LOCKED | EXTENT_DELALLOC, page_ops); - if (disk_num_bytes < cur_alloc_size) - disk_num_bytes = 0; + if (num_bytes < cur_alloc_size) + num_bytes = 0; else - disk_num_bytes -= cur_alloc_size; - num_bytes -= cur_alloc_size; + num_bytes -= cur_alloc_size; alloc_hint = ins.objectid + ins.offset; start += cur_alloc_size; extent_reserved = false; @@ -1139,8 +1152,8 @@ static noinline int cow_file_range(struct inode *inode, */ if (extent_reserved) { extent_clear_unlock_delalloc(inode, start, - start + cur_alloc_size, - start + cur_alloc_size, + start + cur_alloc_size - 1, + start + cur_alloc_size - 1, locked_page, clear_bits, page_ops); @@ -8707,7 +8720,6 @@ static int btrfs_submit_direct_hook(struct btrfs_dio_private *dip) /* bio split */ ASSERT(map_length <= INT_MAX); - atomic_inc(&dip->pending_bios); do { clone_len = min_t(int, submit_len, map_length); @@ -8758,7 +8770,8 @@ static int btrfs_submit_direct_hook(struct btrfs_dio_private *dip) if (!status) return 0; - bio_put(bio); + if (bio != orig_bio) + bio_put(bio); out_err: dip->errors = 1; /* @@ -8798,7 +8811,7 @@ static void btrfs_submit_direct(struct bio *dio_bio, struct inode *inode, bio->bi_private = dip; dip->orig_bio = bio; dip->dio_bio = dio_bio; - atomic_set(&dip->pending_bios, 0); + atomic_set(&dip->pending_bios, 1); io_bio = btrfs_io_bio(bio); io_bio->logical = file_offset; @@ -8947,9 +8960,6 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) dio_data.overwrite = 1; inode_unlock(inode); relock = true; - } else if (iocb->ki_flags & IOCB_NOWAIT) { - ret = -EAGAIN; - goto out; } ret = btrfs_delalloc_reserve_space(inode, &data_reserved, offset, count); diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index ca15d65a2070c089ba53d61c4afca06e20121578..654ab6e57ec3278db251ae1177e2c3c6dc3e2b5f 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c @@ -35,6 +35,7 @@ #include "btrfs_inode.h" #include "transaction.h" #include "compression.h" +#include "xattr.h" /* * Maximum number of references an extent can have in order for us to attempt to @@ -4554,6 +4555,10 @@ static int __process_new_xattr(int num, struct btrfs_key *di_key, struct fs_path *p; struct posix_acl_xattr_header dummy_acl; + /* Capabilities are emitted by finish_inode_if_needed */ + if (!strncmp(name, XATTR_NAME_CAPS, name_len)) + return 0; + p = fs_path_alloc(); if (!p) return -ENOMEM; @@ -5096,6 +5101,64 @@ static int send_extent_data(struct send_ctx *sctx, return 0; } +/* + * Search for a capability xattr related to sctx->cur_ino. If the capability is + * found, call send_set_xattr function to emit it. + * + * Return 0 if there isn't a capability, or when the capability was emitted + * successfully, or < 0 if an error occurred. + */ +static int send_capabilities(struct send_ctx *sctx) +{ + struct fs_path *fspath = NULL; + struct btrfs_path *path; + struct btrfs_dir_item *di; + struct extent_buffer *leaf; + unsigned long data_ptr; + char *buf = NULL; + int buf_len; + int ret = 0; + + path = alloc_path_for_send(); + if (!path) + return -ENOMEM; + + di = btrfs_lookup_xattr(NULL, sctx->send_root, path, sctx->cur_ino, + XATTR_NAME_CAPS, strlen(XATTR_NAME_CAPS), 0); + if (!di) { + /* There is no xattr for this inode */ + goto out; + } else if (IS_ERR(di)) { + ret = PTR_ERR(di); + goto out; + } + + leaf = path->nodes[0]; + buf_len = btrfs_dir_data_len(leaf, di); + + fspath = fs_path_alloc(); + buf = kmalloc(buf_len, GFP_KERNEL); + if (!fspath || !buf) { + ret = -ENOMEM; + goto out; + } + + ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, fspath); + if (ret < 0) + goto out; + + data_ptr = (unsigned long)(di + 1) + btrfs_dir_name_len(leaf, di); + read_extent_buffer(leaf, buf, data_ptr, buf_len); + + ret = send_set_xattr(sctx, fspath, XATTR_NAME_CAPS, + strlen(XATTR_NAME_CAPS), buf, buf_len); +out: + kfree(buf); + fs_path_free(fspath); + btrfs_free_path(path); + return ret; +} + static int clone_range(struct send_ctx *sctx, struct clone_root *clone_root, const u64 disk_byte, @@ -5907,6 +5970,10 @@ static int finish_inode_if_needed(struct send_ctx *sctx, int at_end) goto out; } + ret = send_capabilities(sctx); + if (ret < 0) + goto out; + /* * If other directory inodes depended on our current directory * inode's move/rename, now do their move/rename operations. diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 6d34842912e8048b0228ad811aaa509c322ff972..4ff96e0aa26a96ab2010b6486b8767cbfad1b737 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -6901,6 +6901,14 @@ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info) mutex_lock(&uuid_mutex); mutex_lock(&fs_info->chunk_mutex); + /* + * It is possible for mount and umount to race in such a way that + * we execute this code path, but open_fs_devices failed to clear + * total_rw_bytes. We certainly want it cleared before reading the + * device items, so clear it here. + */ + fs_info->fs_devices->total_rw_bytes = 0; + /* * Read all device items, and then all the chunk items. All * device items are found before any chunk item (their object id diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c index 5e9176ec0d3ae7234adb659564c27bb3506d8893..c073a0f680fd8ca1a245b97bbca4582f5cef8371 100644 --- a/fs/cachefiles/rdwr.c +++ b/fs/cachefiles/rdwr.c @@ -64,9 +64,9 @@ static int cachefiles_read_waiter(wait_queue_entry_t *wait, unsigned mode, object = container_of(op->op.object, struct cachefiles_object, fscache); spin_lock(&object->work_lock); list_add_tail(&monitor->op_link, &op->to_do); + fscache_enqueue_retrieval(op); spin_unlock(&object->work_lock); - fscache_enqueue_retrieval(op); fscache_put_retrieval(op); return 0; } diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index 1b5a50848b5be486ce9ad76b6508b1feb3e8d267..589cfe3ed873b9cade939f4c2d31b055de6f3acd 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -3502,6 +3502,7 @@ static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex, WARN_ON(1); tsession = NULL; target = -1; + mutex_lock(&session->s_mutex); } goto retry; diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 58e7288e5151cedef80711f28f8339d36ddfd1c6..64e52374ecc2310ba5cf91fcdc865ff1ab75ab57 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -4324,9 +4324,12 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid) vol_info->retry = master_tcon->retry; vol_info->nocase = master_tcon->nocase; vol_info->local_lease = master_tcon->local_lease; + vol_info->resilient = master_tcon->use_resilient; + vol_info->persistent = master_tcon->use_persistent; vol_info->no_linux_ext = !master_tcon->unix_ext; vol_info->sectype = master_tcon->ses->sectype; vol_info->sign = master_tcon->ses->sign; + vol_info->seal = master_tcon->seal; rc = cifs_set_vol_auth(vol_info, master_tcon->ses); if (rc) { diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 662977b8d6aeada1da5b1ad126be437cd7354fac..72e7cbfb325a6e172c0b3a36ac9124d77d65896d 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -3496,7 +3496,7 @@ cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset) * than it negotiated since it will refuse the read * then. */ - if ((tcon->ses) && !(tcon->ses->capabilities & + if (!(tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)) { current_read_size = min_t(uint, current_read_size, CIFSMaxBufSize); diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index bdce714e9448950f4fff2deca667e929128762e1..b76e7339529978ad1a55e2bf7be9abefb9187308 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -2210,6 +2210,15 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs, if (rc == 0) { cifsInode->server_eof = attrs->ia_size; cifs_setsize(inode, attrs->ia_size); + + /* + * The man page of truncate says if the size changed, + * then the st_ctime and st_mtime fields for the file + * are updated. + */ + attrs->ia_ctime = attrs->ia_mtime = current_time(inode); + attrs->ia_valid |= ATTR_CTIME | ATTR_MTIME; + cifs_truncate_page(inode->i_mapping, inode->i_size); } diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index 951c444d83e7b708172b7b92c16d7a1ed9dd3fd3..b46fdb2b8d34979076eeb3856a42140cd20e29e4 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -1755,6 +1755,12 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon, inode = d_inode(cfile->dentry); cifsi = CIFS_I(inode); + /* + * We zero the range through ioctl, so we need remove the page caches + * first, otherwise the data may be inconsistent with the server. + */ + truncate_pagecache_range(inode, offset, offset + len - 1); + /* if file not oplocked can't be sure whether asking to extend size */ if (!CIFS_CACHE_READ(cifsi)) if (keep_size == false) { @@ -1824,6 +1830,12 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon, return rc; } + /* + * We implement the punch hole through ioctl, so we need remove the page + * caches first, otherwise the data may be inconsistent with the server. + */ + truncate_pagecache_range(inode, offset, offset + len - 1); + cifs_dbg(FYI, "offset %lld len %lld", offset, len); fsctl_buf.FileOffset = cpu_to_le64(offset); diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c index c2ef617d2f97db5154f12bf023fc6775abd3331a..c875f246cb0e94a7dfeb94ce9dfb7bc89df4d76f 100644 --- a/fs/configfs/dir.c +++ b/fs/configfs/dir.c @@ -1537,6 +1537,7 @@ static int configfs_rmdir(struct inode *dir, struct dentry *dentry) spin_lock(&configfs_dirent_lock); configfs_detach_rollback(dentry); spin_unlock(&configfs_dirent_lock); + config_item_put(parent_item); return -EINTR; } frag->frag_dead = true; diff --git a/fs/coredump.c b/fs/coredump.c index 4b15f407c1c0a600ebae8c152ad02852ffa7d231..aaadbe71bad87d5dd18539c4a68cceddef3fd624 100644 --- a/fs/coredump.c +++ b/fs/coredump.c @@ -758,6 +758,14 @@ void do_coredump(const siginfo_t *siginfo) if (displaced) put_files_struct(displaced); if (!dump_interrupted()) { + /* + * umh disabled with CONFIG_STATIC_USERMODEHELPER_PATH="" would + * have this set to NULL. + */ + if (!cprm.file) { + pr_info("Core dump to |%s disabled\n", cn.corename); + goto close_fail; + } file_start_write(cprm.file); core_dumped = binfmt->core_dump(&cprm); file_end_write(cprm.file); diff --git a/fs/crypto/bio.c b/fs/crypto/bio.c index aa36d245f548ded2a2e652426e601872557d8980..b1c5175cd2380d841d2729aa64b6b696f10e7d70 100644 --- a/fs/crypto/bio.c +++ b/fs/crypto/bio.c @@ -77,7 +77,8 @@ static int fscrypt_zeroout_range_inlinecrypt(const struct inode *inode, lblk += blocks_this_page; pblk += blocks_this_page; len -= blocks_this_page; - } while (++i != BIO_MAX_PAGES && len != 0); + } while (++i != BIO_MAX_PAGES && len != 0 && + fscrypt_mergeable_bio(bio, inode, lblk)); err = submit_bio_wait(bio); if (err) diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c index 02ab7b76d157d3340dcee324c085d9ace7e54a8b..1ebf49a27df7be805dd17cce6da994a2e20db995 100644 --- a/fs/crypto/crypto.c +++ b/fs/crypto/crypto.c @@ -26,6 +26,7 @@ #include #include #include "fscrypt_private.h" +#include static unsigned int num_prealloc_crypto_pages = 32; @@ -53,6 +54,7 @@ struct page *fscrypt_alloc_bounce_page(gfp_t gfp_flags) /** * fscrypt_free_bounce_page() - free a ciphertext bounce page + * @bounce_page: the bounce page to free, or NULL * * Free a bounce page that was allocated by fscrypt_encrypt_pagecache_blocks(), * or by fscrypt_alloc_bounce_page() directly. @@ -87,9 +89,7 @@ void fscrypt_generate_iv(union fscrypt_iv *iv, u64 lblk_num, #endif memset(iv, 0, ci->ci_mode->ivsize); - if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64 || - ((fscrypt_policy_contents_mode(&ci->ci_policy) == - FSCRYPT_MODE_PRIVATE) && inlinecrypt)) { + if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64) { WARN_ON_ONCE(lblk_num > U32_MAX); WARN_ON_ONCE(ci->ci_inode->i_ino > U32_MAX); lblk_num |= (u64)ci->ci_inode->i_ino << 32; @@ -98,6 +98,16 @@ void fscrypt_generate_iv(union fscrypt_iv *iv, u64 lblk_num, lblk_num = (u32)(ci->ci_hashed_ino + lblk_num); } else if (flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) { memcpy(iv->nonce, ci->ci_nonce, FS_KEY_DERIVATION_NONCE_SIZE); + } else if ((fscrypt_policy_contents_mode(&ci->ci_policy) == + FSCRYPT_MODE_PRIVATE) + && inlinecrypt) { + if (ci->ci_inode->i_sb->s_type->name) { + if (!strcmp(ci->ci_inode->i_sb->s_type->name, "f2fs")) { + WARN_ON_ONCE(lblk_num > U32_MAX); + WARN_ON_ONCE(ci->ci_inode->i_ino > U32_MAX); + lblk_num |= (u64)ci->ci_inode->i_ino << 32; + } + } } iv->lblk_num = cpu_to_le64(lblk_num); } @@ -150,7 +160,8 @@ int fscrypt_crypt_block(const struct inode *inode, fscrypt_direction_t rw, } /** - * fscrypt_encrypt_pagecache_blocks() - Encrypt filesystem blocks from a pagecache page + * fscrypt_encrypt_pagecache_blocks() - Encrypt filesystem blocks from a + * pagecache page * @page: The locked pagecache page containing the block(s) to encrypt * @len: Total size of the block(s) to encrypt. Must be a nonzero * multiple of the filesystem's block size. @@ -240,7 +251,8 @@ int fscrypt_encrypt_block_inplace(const struct inode *inode, struct page *page, EXPORT_SYMBOL(fscrypt_encrypt_block_inplace); /** - * fscrypt_decrypt_pagecache_blocks() - Decrypt filesystem blocks in a pagecache page + * fscrypt_decrypt_pagecache_blocks() - Decrypt filesystem blocks in a + * pagecache page * @page: The locked pagecache page containing the block(s) to decrypt * @len: Total size of the block(s) to decrypt. Must be a nonzero * multiple of the filesystem's block size. @@ -364,6 +376,8 @@ void fscrypt_msg(const struct inode *inode, const char *level, /** * fscrypt_init() - Set up for fs encryption. + * + * Return: 0 on success; -errno on failure */ static int __init fscrypt_init(void) { diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c index 63bfe5e8accd2814f49159d541727a77f8fbb2a4..fb43feafbb73da9e75800810e527aa8395804fe4 100644 --- a/fs/crypto/fname.c +++ b/fs/crypto/fname.c @@ -18,7 +18,7 @@ #include #include "fscrypt_private.h" -/** +/* * struct fscrypt_nokey_name - identifier for directory entry when key is absent * * When userspace lists an encrypted directory without access to the key, the @@ -106,9 +106,12 @@ static inline bool fscrypt_is_dot_dotdot(const struct qstr *str) /** * fscrypt_fname_encrypt() - encrypt a filename - * - * The output buffer must be at least as large as the input buffer. - * Any extra space is filled with NUL padding before encryption. + * @inode: inode of the parent directory (for regular filenames) + * or of the symlink (for symlink targets) + * @iname: the filename to encrypt + * @out: (output) the encrypted filename + * @olen: size of the encrypted filename. It must be at least @iname->len. + * Any extra space is filled with NUL padding before encryption. * * Return: 0 on success, -errno on failure */ @@ -158,8 +161,11 @@ int fscrypt_fname_encrypt(const struct inode *inode, const struct qstr *iname, /** * fname_decrypt() - decrypt a filename - * - * The caller must have allocated sufficient memory for the @oname string. + * @inode: inode of the parent directory (for regular filenames) + * or of the symlink (for symlink targets) + * @iname: the encrypted filename to decrypt + * @oname: (output) the decrypted filename. The caller must have allocated + * enough space for this, e.g. using fscrypt_fname_alloc_buffer(). * * Return: 0 on success, -errno on failure */ @@ -207,7 +213,10 @@ static const char lookup_table[65] = #define BASE64_CHARS(nbytes) DIV_ROUND_UP((nbytes) * 4, 3) /** - * base64_encode() - + * base64_encode() - base64-encode some bytes + * @src: the bytes to encode + * @len: number of bytes to encode + * @dst: (output) the base64-encoded string. Not NUL-terminated. * * Encodes the input string using characters from the set [A-Za-z0-9+,]. * The encoded string is roughly 4/3 times the size of the input string. @@ -273,7 +282,12 @@ bool fscrypt_fname_encrypted_size(const struct inode *inode, u32 orig_len, } /** - * fscrypt_fname_alloc_buffer - allocate a buffer for presented filenames + * fscrypt_fname_alloc_buffer() - allocate a buffer for presented filenames + * @inode: inode of the parent directory (for regular filenames) + * or of the symlink (for symlink targets) + * @max_encrypted_len: maximum length of encrypted filenames the buffer will be + * used to present + * @crypto_str: (output) buffer to allocate * * Allocate a buffer that is large enough to hold any decrypted or encoded * filename (null-terminated), for the given maximum encrypted filename length. @@ -298,9 +312,10 @@ int fscrypt_fname_alloc_buffer(const struct inode *inode, EXPORT_SYMBOL(fscrypt_fname_alloc_buffer); /** - * fscrypt_fname_free_buffer - free the buffer for presented filenames + * fscrypt_fname_free_buffer() - free a buffer for presented filenames + * @crypto_str: the buffer to free * - * Free the buffer allocated by fscrypt_fname_alloc_buffer(). + * Free a buffer that was allocated by fscrypt_fname_alloc_buffer(). */ void fscrypt_fname_free_buffer(struct fscrypt_str *crypto_str) { @@ -312,10 +327,19 @@ void fscrypt_fname_free_buffer(struct fscrypt_str *crypto_str) EXPORT_SYMBOL(fscrypt_fname_free_buffer); /** - * fscrypt_fname_disk_to_usr() - converts a filename from disk space to user - * space - * - * The caller must have allocated sufficient memory for the @oname string. + * fscrypt_fname_disk_to_usr() - convert an encrypted filename to + * user-presentable form + * @inode: inode of the parent directory (for regular filenames) + * or of the symlink (for symlink targets) + * @hash: first part of the name's dirhash, if applicable. This only needs to + * be provided if the filename is located in an indexed directory whose + * encryption key may be unavailable. Not needed for symlink targets. + * @minor_hash: second part of the name's dirhash, if applicable + * @iname: encrypted filename to convert. May also be "." or "..", which + * aren't actually encrypted. + * @oname: output buffer for the user-presentable filename. The caller must + * have allocated enough space for this, e.g. using + * fscrypt_fname_alloc_buffer(). * * If the key is available, we'll decrypt the disk name. Otherwise, we'll * encode it for presentation in fscrypt_nokey_name format. diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h index 67bcdfa1609510b3be4f177d9850515ce2e0d9b3..50fbcf6896a706e47fb817bffcf2a5439a996f40 100644 --- a/fs/crypto/fscrypt_private.h +++ b/fs/crypto/fscrypt_private.h @@ -45,7 +45,7 @@ struct fscrypt_context_v2 { u8 nonce[FS_KEY_DERIVATION_NONCE_SIZE]; }; -/** +/* * fscrypt_context - the encryption context of an inode * * This is the on-disk equivalent of an fscrypt_policy, stored alongside each @@ -159,7 +159,7 @@ fscrypt_policy_flags(const union fscrypt_policy *policy) BUG(); } -/** +/* * For encrypted symlinks, the ciphertext length is stored at the beginning * of the string in little-endian format. */ @@ -245,6 +245,7 @@ struct fscrypt_info { /* This inode's nonce, copied from the fscrypt_context */ u8 ci_nonce[FS_KEY_DERIVATION_NONCE_SIZE]; + u8 ci_raw_key[FSCRYPT_MAX_KEY_SIZE]; /* Hashed inode number. Only set for IV_INO_LBLK_32 */ u32 ci_hashed_ino; @@ -257,15 +258,14 @@ typedef enum { /* crypto.c */ extern struct kmem_cache *fscrypt_info_cachep; -extern int fscrypt_initialize(unsigned int cop_flags); -extern int fscrypt_crypt_block(const struct inode *inode, - fscrypt_direction_t rw, u64 lblk_num, - struct page *src_page, struct page *dest_page, - unsigned int len, unsigned int offs, - gfp_t gfp_flags); -extern struct page *fscrypt_alloc_bounce_page(gfp_t gfp_flags); - -extern void __printf(3, 4) __cold +int fscrypt_initialize(unsigned int cop_flags); +int fscrypt_crypt_block(const struct inode *inode, fscrypt_direction_t rw, + u64 lblk_num, struct page *src_page, + struct page *dest_page, unsigned int len, + unsigned int offs, gfp_t gfp_flags); +struct page *fscrypt_alloc_bounce_page(gfp_t gfp_flags); + +void __printf(3, 4) __cold fscrypt_msg(const struct inode *inode, const char *level, const char *fmt, ...); #define fscrypt_warn(inode, fmt, ...) \ @@ -291,12 +291,10 @@ void fscrypt_generate_iv(union fscrypt_iv *iv, u64 lblk_num, const struct fscrypt_info *ci); /* fname.c */ -extern int fscrypt_fname_encrypt(const struct inode *inode, - const struct qstr *iname, - u8 *out, unsigned int olen); -extern bool fscrypt_fname_encrypted_size(const struct inode *inode, - u32 orig_len, u32 max_len, - u32 *encrypted_len_ret); +int fscrypt_fname_encrypt(const struct inode *inode, const struct qstr *iname, + u8 *out, unsigned int olen); +bool fscrypt_fname_encrypted_size(const struct inode *inode, u32 orig_len, + u32 max_len, u32 *encrypted_len_ret); /* hkdf.c */ @@ -304,8 +302,8 @@ struct fscrypt_hkdf { struct crypto_shash *hmac_tfm; }; -extern int fscrypt_init_hkdf(struct fscrypt_hkdf *hkdf, const u8 *master_key, - unsigned int master_key_size); +int fscrypt_init_hkdf(struct fscrypt_hkdf *hkdf, const u8 *master_key, + unsigned int master_key_size); /* * The list of contexts in which fscrypt uses HKDF. These values are used as @@ -322,11 +320,11 @@ extern int fscrypt_init_hkdf(struct fscrypt_hkdf *hkdf, const u8 *master_key, #define HKDF_CONTEXT_IV_INO_LBLK_32_KEY 6 #define HKDF_CONTEXT_INODE_HASH_KEY 7 -extern int fscrypt_hkdf_expand(const struct fscrypt_hkdf *hkdf, u8 context, - const u8 *info, unsigned int infolen, - u8 *okm, unsigned int okmlen); +int fscrypt_hkdf_expand(const struct fscrypt_hkdf *hkdf, u8 context, + const u8 *info, unsigned int infolen, + u8 *okm, unsigned int okmlen); -extern void fscrypt_destroy_hkdf(struct fscrypt_hkdf *hkdf); +void fscrypt_destroy_hkdf(struct fscrypt_hkdf *hkdf); /* inline_crypt.c */ #ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT @@ -567,14 +565,17 @@ static inline int master_key_spec_len(const struct fscrypt_key_specifier *spec) return 0; } -extern struct key * +struct key * fscrypt_find_master_key(struct super_block *sb, const struct fscrypt_key_specifier *mk_spec); -extern int fscrypt_verify_key_added(struct super_block *sb, - const u8 identifier[FSCRYPT_KEY_IDENTIFIER_SIZE]); +int fscrypt_add_test_dummy_key(struct super_block *sb, + struct fscrypt_key_specifier *key_spec); + +int fscrypt_verify_key_added(struct super_block *sb, + const u8 identifier[FSCRYPT_KEY_IDENTIFIER_SIZE]); -extern int __init fscrypt_init_keyring(void); +int __init fscrypt_init_keyring(void); /* keysetup.c */ @@ -589,36 +590,34 @@ struct fscrypt_mode { extern struct fscrypt_mode fscrypt_modes[]; -extern int fscrypt_prepare_key(struct fscrypt_prepared_key *prep_key, - const u8 *raw_key, unsigned int raw_key_size, - bool is_hw_wrapped, - const struct fscrypt_info *ci); +int fscrypt_prepare_key(struct fscrypt_prepared_key *prep_key, + const u8 *raw_key, unsigned int raw_key_size, + bool is_hw_wrapped, const struct fscrypt_info *ci); -extern void fscrypt_destroy_prepared_key(struct fscrypt_prepared_key *prep_key); +void fscrypt_destroy_prepared_key(struct fscrypt_prepared_key *prep_key); -extern int fscrypt_set_per_file_enc_key(struct fscrypt_info *ci, - const u8 *raw_key); +int fscrypt_set_per_file_enc_key(struct fscrypt_info *ci, const u8 *raw_key); -extern int fscrypt_derive_dirhash_key(struct fscrypt_info *ci, - const struct fscrypt_master_key *mk); +int fscrypt_derive_dirhash_key(struct fscrypt_info *ci, + const struct fscrypt_master_key *mk); /* keysetup_v1.c */ -extern void fscrypt_put_direct_key(struct fscrypt_direct_key *dk); +void fscrypt_put_direct_key(struct fscrypt_direct_key *dk); + +int fscrypt_setup_v1_file_key(struct fscrypt_info *ci, + const u8 *raw_master_key); -extern int fscrypt_setup_v1_file_key(struct fscrypt_info *ci, - const u8 *raw_master_key); +int fscrypt_setup_v1_file_key_via_subscribed_keyrings(struct fscrypt_info *ci); -extern int fscrypt_setup_v1_file_key_via_subscribed_keyrings( - struct fscrypt_info *ci); /* policy.c */ -extern bool fscrypt_policies_equal(const union fscrypt_policy *policy1, - const union fscrypt_policy *policy2); -extern bool fscrypt_supported_policy(const union fscrypt_policy *policy_u, - const struct inode *inode); -extern int fscrypt_policy_from_context(union fscrypt_policy *policy_u, - const union fscrypt_context *ctx_u, - int ctx_size); +bool fscrypt_policies_equal(const union fscrypt_policy *policy1, + const union fscrypt_policy *policy2); +bool fscrypt_supported_policy(const union fscrypt_policy *policy_u, + const struct inode *inode); +int fscrypt_policy_from_context(union fscrypt_policy *policy_u, + const union fscrypt_context *ctx_u, + int ctx_size); #endif /* _FSCRYPT_PRIVATE_H */ diff --git a/fs/crypto/hooks.c b/fs/crypto/hooks.c index a6396bf721acfd86a98c8810b2e2bf85d4d71f8d..a3582f2ca3769da488d93a10c48a5fa915ea8c6e 100644 --- a/fs/crypto/hooks.c +++ b/fs/crypto/hooks.c @@ -9,7 +9,7 @@ #include "fscrypt_private.h" /** - * fscrypt_file_open - prepare to open a possibly-encrypted regular file + * fscrypt_file_open() - prepare to open a possibly-encrypted regular file * @inode: the inode being opened * @filp: the struct file being set up * @@ -260,7 +260,7 @@ int __fscrypt_encrypt_symlink(struct inode *inode, const char *target, EXPORT_SYMBOL_GPL(__fscrypt_encrypt_symlink); /** - * fscrypt_get_symlink - get the target of an encrypted symlink + * fscrypt_get_symlink() - get the target of an encrypted symlink * @inode: the symlink inode * @caddr: the on-disk contents of the symlink * @max_size: size of @caddr buffer diff --git a/fs/crypto/inline_crypt.c b/fs/crypto/inline_crypt.c index 69c281a331e506cb2ebc7e5009f08948b96cb0ca..464783ff8e74a678976725d6eedfd9263471f1bb 100644 --- a/fs/crypto/inline_crypt.c +++ b/fs/crypto/inline_crypt.c @@ -43,11 +43,32 @@ static void fscrypt_get_devices(struct super_block *sb, int num_devs, sb->s_cop->get_devices(sb, devs); } +#define SDHCI "sdhci" + +static int fscrypt_find_storage_type(char **device) +{ + char boot[20] = {'\0'}; + char *match = (char *)strnstr(saved_command_line, + "androidboot.bootdevice=", + strlen(saved_command_line)); + if (match) { + memcpy(boot, (match + strlen("androidboot.bootdevice=")), + sizeof(boot) - 1); + + if (strnstr(boot, "sdhci", strlen(boot))) + *device = SDHCI; + + return 0; + } + return -EINVAL; +} + static unsigned int fscrypt_get_dun_bytes(const struct fscrypt_info *ci) { struct super_block *sb = ci->ci_inode->i_sb; unsigned int flags = fscrypt_policy_flags(&ci->ci_policy); int ino_bits = 64, lblk_bits = 64; + char *s_type = "ufs"; if (flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) return offsetofend(union fscrypt_iv, nonce); @@ -58,6 +79,15 @@ static unsigned int fscrypt_get_dun_bytes(const struct fscrypt_info *ci) if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32) return sizeof(__le32); + if (fscrypt_policy_contents_mode(&ci->ci_policy) == + FSCRYPT_MODE_PRIVATE) { + fscrypt_find_storage_type(&s_type); + if (!strcmp(s_type, "sdhci")) + return sizeof(__le32); + else + return sizeof(__le64); + } + /* Default case: IVs are just the file logical block number */ if (sb->s_cop->get_ino_and_lblk_bits) sb->s_cop->get_ino_and_lblk_bits(sb, &ino_bits, &lblk_bits); @@ -89,6 +119,19 @@ int fscrypt_select_encryption_impl(struct fscrypt_info *ci, !sb->s_cop->inline_crypt_enabled(sb)) return 0; + /* + * When a page contains multiple logically contiguous filesystem blocks, + * some filesystem code only calls fscrypt_mergeable_bio() for the first + * block in the page. This is fine for most of fscrypt's IV generation + * strategies, where contiguous blocks imply contiguous IVs. But it + * doesn't work with IV_INO_LBLK_32. For now, simply exclude + * IV_INO_LBLK_32 with blocksize != PAGE_SIZE from inline encryption. + */ + if ((fscrypt_policy_flags(&ci->ci_policy) & + FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32) && + sb->s_blocksize != PAGE_SIZE) + return 0; + /* * The needed encryption settings must be supported either by * blk-crypto-fallback, or by hardware on all the filesystem's devices. @@ -311,6 +354,10 @@ void fscrypt_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode, fscrypt_generate_dun(ci, first_lblk, dun); bio_crypt_set_ctx(bio, &ci->ci_key.blk_key->base, dun, gfp_mask); + if ((fscrypt_policy_contents_mode(&ci->ci_policy) == + FSCRYPT_MODE_PRIVATE) && + (!strcmp(inode->i_sb->s_type->name, "ext4"))) + bio->bi_crypt_context->is_ext4 = true; } EXPORT_SYMBOL_GPL(fscrypt_set_bio_crypt_ctx); @@ -442,7 +489,6 @@ EXPORT_SYMBOL_GPL(fscrypt_mergeable_bio_bh); bool fscrypt_dio_supported(struct kiocb *iocb, struct iov_iter *iter) { const struct inode *inode = file_inode(iocb->ki_filp); - const struct fscrypt_info *ci = inode->i_crypt_info; const unsigned int blocksize = i_blocksize(inode); /* If the file is unencrypted, no veto from us. */ @@ -460,15 +506,6 @@ bool fscrypt_dio_supported(struct kiocb *iocb, struct iov_iter *iter) if (!IS_ALIGNED(iocb->ki_pos | iov_iter_alignment(iter), blocksize)) return false; - /* - * With IV_INO_LBLK_32 and sub-page blocks, the DUN can wrap around in - * the middle of a page. This isn't handled by the direct I/O code yet. - */ - if (blocksize != PAGE_SIZE && - (fscrypt_policy_flags(&ci->ci_policy) & - FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32)) - return false; - return true; } EXPORT_SYMBOL_GPL(fscrypt_dio_supported); @@ -483,8 +520,6 @@ EXPORT_SYMBOL_GPL(fscrypt_dio_supported); * targeting @pos, in order to avoid crossing a data unit number (DUN) * discontinuity. This is only needed for certain IV generation methods. * - * This assumes block_size == PAGE_SIZE; see fscrypt_dio_supported(). - * * Return: the actual number of pages that can be submitted */ int fscrypt_limit_dio_pages(const struct inode *inode, loff_t pos, int nr_pages) @@ -502,6 +537,10 @@ int fscrypt_limit_dio_pages(const struct inode *inode, loff_t pos, int nr_pages) FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32)) return nr_pages; + /* + * fscrypt_select_encryption_impl() ensures that block_size == PAGE_SIZE + * when using FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32. + */ if (WARN_ON_ONCE(i_blocksize(inode) != PAGE_SIZE)) return 1; diff --git a/fs/crypto/keyring.c b/fs/crypto/keyring.c index fc9ea71b50f7f15698617586be63461c3ab0d2bf..ae081f06e149f528e59b0d53c87689f7dcbf9154 100644 --- a/fs/crypto/keyring.c +++ b/fs/crypto/keyring.c @@ -20,6 +20,7 @@ #include #include +#include #include #include "fscrypt_private.h" @@ -425,9 +426,9 @@ static int add_existing_master_key(struct fscrypt_master_key *mk, return 0; } -static int add_master_key(struct super_block *sb, - struct fscrypt_master_key_secret *secret, - const struct fscrypt_key_specifier *mk_spec) +static int do_add_master_key(struct super_block *sb, + struct fscrypt_master_key_secret *secret, + const struct fscrypt_key_specifier *mk_spec) { static DEFINE_MUTEX(fscrypt_add_key_mutex); struct key *key; @@ -466,6 +467,49 @@ static int add_master_key(struct super_block *sb, return err; } +/* Size of software "secret" derived from hardware-wrapped key */ +#define RAW_SECRET_SIZE 32 + +static int add_master_key(struct super_block *sb, + struct fscrypt_master_key_secret *secret, + struct fscrypt_key_specifier *key_spec) +{ + int err; + + if (key_spec->type == FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER) { + u8 _kdf_key[RAW_SECRET_SIZE]; + u8 *kdf_key = secret->raw; + unsigned int kdf_key_size = secret->size; + + if (secret->is_hw_wrapped) { + kdf_key = _kdf_key; + kdf_key_size = RAW_SECRET_SIZE; + err = fscrypt_derive_raw_secret(sb, secret->raw, + secret->size, + kdf_key, kdf_key_size); + if (err) + return err; + } + err = fscrypt_init_hkdf(&secret->hkdf, kdf_key, kdf_key_size); + /* + * Now that the HKDF context is initialized, the raw HKDF key is + * no longer needed. + */ + memzero_explicit(kdf_key, kdf_key_size); + if (err) + return err; + + /* Calculate the key identifier */ + err = fscrypt_hkdf_expand(&secret->hkdf, + HKDF_CONTEXT_KEY_IDENTIFIER, NULL, 0, + key_spec->u.identifier, + FSCRYPT_KEY_IDENTIFIER_SIZE); + if (err) + return err; + } + return do_add_master_key(sb, secret, key_spec); +} + static int fscrypt_provisioning_key_preparse(struct key_preparsed_payload *prep) { const struct fscrypt_provisioning_key_payload *payload = prep->data; @@ -571,9 +615,6 @@ static int get_keyring_key(u32 key_id, u32 type, return err; } -/* Size of software "secret" derived from hardware-wrapped key */ -#define RAW_SECRET_SIZE 32 - /* * Add a master encryption key to the filesystem, causing all files which were * encrypted with it to appear "unlocked" (decrypted) when accessed. @@ -604,9 +645,6 @@ int fscrypt_ioctl_add_key(struct file *filp, void __user *_uarg) struct fscrypt_add_key_arg __user *uarg = _uarg; struct fscrypt_add_key_arg arg; struct fscrypt_master_key_secret secret; - u8 _kdf_key[RAW_SECRET_SIZE]; - u8 *kdf_key; - unsigned int kdf_key_size; int err; if (copy_from_user(&arg, uarg, sizeof(arg))) @@ -618,7 +656,25 @@ int fscrypt_ioctl_add_key(struct file *filp, void __user *_uarg) if (memchr_inv(arg.__reserved, 0, sizeof(arg.__reserved))) return -EINVAL; + /* + * Only root can add keys that are identified by an arbitrary descriptor + * rather than by a cryptographic hash --- since otherwise a malicious + * user could add the wrong key. + */ + if (arg.key_spec.type == FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR && + !capable(CAP_SYS_ADMIN)) + return -EACCES; + memset(&secret, 0, sizeof(secret)); + + if (arg.__flags) { + if (arg.__flags & ~__FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED) + return -EINVAL; + if (arg.key_spec.type != FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER) + return -EINVAL; + secret.is_hw_wrapped = true; + } + if (arg.key_id) { if (arg.raw_size != 0) return -EINVAL; @@ -626,14 +682,13 @@ int fscrypt_ioctl_add_key(struct file *filp, void __user *_uarg) if (err) goto out_wipe_secret; err = -EINVAL; - if (!(arg.__flags & __FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED) && - secret.size > FSCRYPT_MAX_KEY_SIZE) + if (secret.size > FSCRYPT_MAX_KEY_SIZE && !secret.is_hw_wrapped) goto out_wipe_secret; } else { if (arg.raw_size < FSCRYPT_MIN_KEY_SIZE || - arg.raw_size > - ((arg.__flags & __FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED) ? - FSCRYPT_MAX_HW_WRAPPED_KEY_SIZE : FSCRYPT_MAX_KEY_SIZE)) + arg.raw_size > (secret.is_hw_wrapped ? + FSCRYPT_MAX_HW_WRAPPED_KEY_SIZE : + FSCRYPT_MAX_KEY_SIZE)) return -EINVAL; secret.size = arg.raw_size; err = -EFAULT; @@ -641,73 +696,46 @@ int fscrypt_ioctl_add_key(struct file *filp, void __user *_uarg) goto out_wipe_secret; } - switch (arg.key_spec.type) { - case FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR: - /* - * Only root can add keys that are identified by an arbitrary - * descriptor rather than by a cryptographic hash --- since - * otherwise a malicious user could add the wrong key. - */ - err = -EACCES; - if (!capable(CAP_SYS_ADMIN)) - goto out_wipe_secret; - - err = -EINVAL; - if (arg.__flags & ~__FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED) - goto out_wipe_secret; - break; - case FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER: - err = -EINVAL; - if (arg.__flags & ~__FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED) - goto out_wipe_secret; - if (arg.__flags & __FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED) { - kdf_key = _kdf_key; - kdf_key_size = RAW_SECRET_SIZE; - err = fscrypt_derive_raw_secret(sb, secret.raw, - secret.size, - kdf_key, kdf_key_size); - if (err) - goto out_wipe_secret; - secret.is_hw_wrapped = true; - } else { - kdf_key = secret.raw; - kdf_key_size = secret.size; - } - err = fscrypt_init_hkdf(&secret.hkdf, kdf_key, kdf_key_size); - /* - * Now that the HKDF context is initialized, the raw HKDF - * key is no longer needed. - */ - memzero_explicit(kdf_key, kdf_key_size); - if (err) - goto out_wipe_secret; - - /* Calculate the key identifier and return it to userspace. */ - err = fscrypt_hkdf_expand(&secret.hkdf, - HKDF_CONTEXT_KEY_IDENTIFIER, - NULL, 0, arg.key_spec.u.identifier, - FSCRYPT_KEY_IDENTIFIER_SIZE); - if (err) - goto out_wipe_secret; - err = -EFAULT; - if (copy_to_user(uarg->key_spec.u.identifier, - arg.key_spec.u.identifier, - FSCRYPT_KEY_IDENTIFIER_SIZE)) - goto out_wipe_secret; - break; - default: - WARN_ON(1); - err = -EINVAL; + err = add_master_key(sb, &secret, &arg.key_spec); + if (err) goto out_wipe_secret; - } - err = add_master_key(sb, &secret, &arg.key_spec); + /* Return the key identifier to userspace, if applicable */ + err = -EFAULT; + if (arg.key_spec.type == FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER && + copy_to_user(uarg->key_spec.u.identifier, arg.key_spec.u.identifier, + FSCRYPT_KEY_IDENTIFIER_SIZE)) + goto out_wipe_secret; + err = 0; out_wipe_secret: wipe_master_key_secret(&secret); return err; } EXPORT_SYMBOL_GPL(fscrypt_ioctl_add_key); +/* + * Add the key for '-o test_dummy_encryption' to the filesystem keyring. + * + * Use a per-boot random key to prevent people from misusing this option. + */ +int fscrypt_add_test_dummy_key(struct super_block *sb, + struct fscrypt_key_specifier *key_spec) +{ + static u8 test_key[FSCRYPT_MAX_KEY_SIZE]; + struct fscrypt_master_key_secret secret; + int err; + + get_random_once(test_key, FSCRYPT_MAX_KEY_SIZE); + + memset(&secret, 0, sizeof(secret)); + secret.size = FSCRYPT_MAX_KEY_SIZE; + memcpy(secret.raw, test_key, FSCRYPT_MAX_KEY_SIZE); + + err = add_master_key(sb, &secret, key_spec); + wipe_master_key_secret(&secret); + return err; +} + /* * Verify that the current user has added a master key with the given identifier * (returns -ENOKEY if not). This is needed to prevent a user from encrypting diff --git a/fs/crypto/keysetup.c b/fs/crypto/keysetup.c index 4cac429e7adb509b2a05a79c28ce6193a808bdc1..a4e01a6fcc0079e1dc3d0ac0d871d6f3aa89ef09 100644 --- a/fs/crypto/keysetup.c +++ b/fs/crypto/keysetup.c @@ -168,7 +168,6 @@ static int setup_per_mode_enc_key(struct fscrypt_info *ci, struct fscrypt_prepared_key *keys, u8 hkdf_context, bool include_fs_uuid) { - static DEFINE_MUTEX(mode_key_setup_mutex); const struct inode *inode = ci->ci_inode; const struct super_block *sb = inode->i_sb; struct fscrypt_mode *mode = ci->ci_mode; @@ -237,6 +236,7 @@ static int setup_per_mode_enc_key(struct fscrypt_info *ci, } done_unlock: ci->ci_key = *prep_key; + err = 0; out_unlock: mutex_unlock(&fscrypt_mode_key_setup_mutex); @@ -477,9 +477,18 @@ static void put_crypt_info(struct fscrypt_info *ci) if (ci->ci_direct_key) fscrypt_put_direct_key(ci->ci_direct_key); - else if (ci->ci_owns_key) - fscrypt_destroy_prepared_key(&ci->ci_key); - + else if (ci->ci_owns_key) { + if (fscrypt_policy_contents_mode(&ci->ci_policy) != + FSCRYPT_MODE_PRIVATE) { + fscrypt_destroy_prepared_key(&ci->ci_key); + } else { + crypto_free_skcipher(ci->ci_key.tfm); +#ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT + if (ci->ci_key.blk_key) + kzfree(ci->ci_key.blk_key); +#endif + } + } key = ci->ci_master_key; if (key) { struct fscrypt_master_key *mk = key->payload.data[0]; @@ -520,21 +529,18 @@ int fscrypt_get_encryption_info(struct inode *inode) res = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx)); if (res < 0) { - if (!fscrypt_dummy_context_enabled(inode) || - IS_ENCRYPTED(inode)) { + const union fscrypt_context *dummy_ctx = + fscrypt_get_dummy_context(inode->i_sb); + + if (IS_ENCRYPTED(inode) || !dummy_ctx) { fscrypt_warn(inode, "Error %d getting encryption context", res); return res; } /* Fake up a context for an unencrypted directory */ - memset(&ctx, 0, sizeof(ctx)); - ctx.version = FSCRYPT_CONTEXT_V1; - ctx.v1.contents_encryption_mode = FSCRYPT_MODE_AES_256_XTS; - ctx.v1.filenames_encryption_mode = FSCRYPT_MODE_AES_256_CTS; - memset(ctx.v1.master_key_descriptor, 0x42, - FSCRYPT_KEY_DESCRIPTOR_SIZE); - res = sizeof(ctx.v1); + res = fscrypt_context_size(dummy_ctx); + memcpy(&ctx, dummy_ctx, res); } crypt_info = kmem_cache_zalloc(fscrypt_info_cachep, GFP_NOFS); @@ -600,7 +606,8 @@ int fscrypt_get_encryption_info(struct inode *inode) EXPORT_SYMBOL(fscrypt_get_encryption_info); /** - * fscrypt_put_encryption_info - free most of an inode's fscrypt data + * fscrypt_put_encryption_info() - free most of an inode's fscrypt data + * @inode: an inode being evicted * * Free the inode's fscrypt_info. Filesystems must call this when the inode is * being evicted. An RCU grace period need not have elapsed yet. @@ -613,7 +620,8 @@ void fscrypt_put_encryption_info(struct inode *inode) EXPORT_SYMBOL(fscrypt_put_encryption_info); /** - * fscrypt_free_inode - free an inode's fscrypt data requiring RCU delay + * fscrypt_free_inode() - free an inode's fscrypt data requiring RCU delay + * @inode: an inode being freed * * Free the inode's cached decrypted symlink target, if any. Filesystems must * call this after an RCU grace period, just before they free the inode. @@ -628,7 +636,8 @@ void fscrypt_free_inode(struct inode *inode) EXPORT_SYMBOL(fscrypt_free_inode); /** - * fscrypt_drop_inode - check whether the inode's master key has been removed + * fscrypt_drop_inode() - check whether the inode's master key has been removed + * @inode: an inode being considered for eviction * * Filesystems supporting fscrypt must call this from their ->drop_inode() * method so that encrypted inodes are evicted as soon as they're no longer in diff --git a/fs/crypto/keysetup_v1.c b/fs/crypto/keysetup_v1.c index 38e54313653d24bc1a289b414c3dbf050725d916..b3df8dc4ea67b4303efc94cc784df2f766d28cef 100644 --- a/fs/crypto/keysetup_v1.c +++ b/fs/crypto/keysetup_v1.c @@ -279,6 +279,7 @@ static int setup_v1_file_key_derived(struct fscrypt_info *ci, if ((fscrypt_policy_contents_mode(&ci->ci_policy) == FSCRYPT_MODE_PRIVATE) && fscrypt_using_inline_encryption(ci)) { + ci->ci_owns_key = true; memcpy(key_new.bytes, raw_master_key, ci->ci_mode->keysize); for (i = 0; i < ARRAY_SIZE(key_new.words); i++) diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c index 804595aaea0387aa8116b05cfa9ea7821b643727..5709abdee77daa62439aa7c9e35ef7f4d158e92b 100644 --- a/fs/crypto/policy.c +++ b/fs/crypto/policy.c @@ -11,12 +11,15 @@ */ #include +#include #include #include #include "fscrypt_private.h" /** - * fscrypt_policies_equal - check whether two encryption policies are the same + * fscrypt_policies_equal() - check whether two encryption policies are the same + * @policy1: the first policy + * @policy2: the second policy * * Return: %true if equal, else %false */ @@ -194,7 +197,9 @@ static bool fscrypt_supported_v2_policy(const struct fscrypt_policy_v2 *policy, } /** - * fscrypt_supported_policy - check whether an encryption policy is supported + * fscrypt_supported_policy() - check whether an encryption policy is supported + * @policy_u: the encryption policy + * @inode: the inode on which the policy will be used * * Given an encryption policy, check whether all its encryption modes and other * settings are supported by this kernel on the given inode. (But we don't @@ -216,7 +221,10 @@ bool fscrypt_supported_policy(const union fscrypt_policy *policy_u, } /** - * fscrypt_new_context_from_policy - create a new fscrypt_context from a policy + * fscrypt_new_context_from_policy() - create a new fscrypt_context from + * an fscrypt_policy + * @ctx_u: output context + * @policy_u: input policy * * Create an fscrypt_context for an inode that is being assigned the given * encryption policy. A new nonce is randomly generated. @@ -266,7 +274,11 @@ static int fscrypt_new_context_from_policy(union fscrypt_context *ctx_u, } /** - * fscrypt_policy_from_context - convert an fscrypt_context to an fscrypt_policy + * fscrypt_policy_from_context() - convert an fscrypt_context to + * an fscrypt_policy + * @policy_u: output policy + * @ctx_u: input context + * @ctx_size: size of input context in bytes * * Given an fscrypt_context, build the corresponding fscrypt_policy. * @@ -632,3 +644,127 @@ int fscrypt_inherit_context(struct inode *parent, struct inode *child, return preload ? fscrypt_get_encryption_info(child): 0; } EXPORT_SYMBOL(fscrypt_inherit_context); + +/** + * fscrypt_set_test_dummy_encryption() - handle '-o test_dummy_encryption' + * @sb: the filesystem on which test_dummy_encryption is being specified + * @arg: the argument to the test_dummy_encryption option. + * If no argument was specified, then @arg->from == NULL. + * @dummy_ctx: the filesystem's current dummy context (input/output, see below) + * + * Handle the test_dummy_encryption mount option by creating a dummy encryption + * context, saving it in @dummy_ctx, and adding the corresponding dummy + * encryption key to the filesystem. If the @dummy_ctx is already set, then + * instead validate that it matches @arg. Don't support changing it via + * remount, as that is difficult to do safely. + * + * The reason we use an fscrypt_context rather than an fscrypt_policy is because + * we mustn't generate a new nonce each time we access a dummy-encrypted + * directory, as that would change the way filenames are encrypted. + * + * Return: 0 on success (dummy context set, or the same context is already set); + * -EEXIST if a different dummy context is already set; + * or another -errno value. + */ +int fscrypt_set_test_dummy_encryption(struct super_block *sb, + const substring_t *arg, + struct fscrypt_dummy_context *dummy_ctx) +{ + const char *argstr = "v2"; + const char *argstr_to_free = NULL; + struct fscrypt_key_specifier key_spec = { 0 }; + int version; + union fscrypt_context *ctx = NULL; + int err; + + if (arg->from) { + argstr = argstr_to_free = match_strdup(arg); + if (!argstr) + return -ENOMEM; + } + + if (!strcmp(argstr, "v1")) { + version = FSCRYPT_CONTEXT_V1; + key_spec.type = FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR; + memset(key_spec.u.descriptor, 0x42, + FSCRYPT_KEY_DESCRIPTOR_SIZE); + } else if (!strcmp(argstr, "v2")) { + version = FSCRYPT_CONTEXT_V2; + key_spec.type = FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER; + /* key_spec.u.identifier gets filled in when adding the key */ + } else { + err = -EINVAL; + goto out; + } + + if (dummy_ctx->ctx) { + /* + * Note: if we ever make test_dummy_encryption support + * specifying other encryption settings, such as the encryption + * modes, we'll need to compare those settings here. + */ + if (dummy_ctx->ctx->version == version) + err = 0; + else + err = -EEXIST; + goto out; + } + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) { + err = -ENOMEM; + goto out; + } + + err = fscrypt_add_test_dummy_key(sb, &key_spec); + if (err) + goto out; + + ctx->version = version; + switch (ctx->version) { + case FSCRYPT_CONTEXT_V1: + ctx->v1.contents_encryption_mode = FSCRYPT_MODE_AES_256_XTS; + ctx->v1.filenames_encryption_mode = FSCRYPT_MODE_AES_256_CTS; + memcpy(ctx->v1.master_key_descriptor, key_spec.u.descriptor, + FSCRYPT_KEY_DESCRIPTOR_SIZE); + break; + case FSCRYPT_CONTEXT_V2: + ctx->v2.contents_encryption_mode = FSCRYPT_MODE_AES_256_XTS; + ctx->v2.filenames_encryption_mode = FSCRYPT_MODE_AES_256_CTS; + memcpy(ctx->v2.master_key_identifier, key_spec.u.identifier, + FSCRYPT_KEY_IDENTIFIER_SIZE); + break; + default: + WARN_ON(1); + err = -EINVAL; + goto out; + } + dummy_ctx->ctx = ctx; + ctx = NULL; + err = 0; +out: + kfree(ctx); + kfree(argstr_to_free); + return err; +} +EXPORT_SYMBOL_GPL(fscrypt_set_test_dummy_encryption); + +/** + * fscrypt_show_test_dummy_encryption() - show '-o test_dummy_encryption' + * @seq: the seq_file to print the option to + * @sep: the separator character to use + * @sb: the filesystem whose options are being shown + * + * Show the test_dummy_encryption mount option, if it was specified. + * This is mainly used for /proc/mounts. + */ +void fscrypt_show_test_dummy_encryption(struct seq_file *seq, char sep, + struct super_block *sb) +{ + const union fscrypt_context *ctx = fscrypt_get_dummy_context(sb); + + if (!ctx) + return; + seq_printf(seq, "%ctest_dummy_encryption=v%d", sep, ctx->version); +} +EXPORT_SYMBOL_GPL(fscrypt_show_test_dummy_encryption); diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h index 748e8d59e61113b8491fd896d2439ac5853ea5d1..cb287df13a7aa72b9f32442fc900cc042aebf808 100644 --- a/fs/dlm/dlm_internal.h +++ b/fs/dlm/dlm_internal.h @@ -99,7 +99,6 @@ do { \ __LINE__, __FILE__, #x, jiffies); \ {do} \ printk("\n"); \ - BUG(); \ panic("DLM: Record message above and reboot.\n"); \ } \ } diff --git a/fs/exec.c b/fs/exec.c index 52ab168d4dcc5a37850347b974dcf1191f6d5104..e8e592d2020ffddcca52f792bbe280e34c3cac30 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -1264,6 +1264,8 @@ int flush_old_exec(struct linux_binprm * bprm) */ set_mm_exe_file(bprm->mm, bprm->file); + would_dump(bprm, bprm->file); + /* * Release all of the old mmap stuff */ @@ -1797,8 +1799,6 @@ static int do_execveat_common(int fd, struct filename *filename, if (retval < 0) goto out; - would_dump(bprm, bprm->file); - retval = exec_binprm(bprm); if (retval < 0) goto out; diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c index 13eb028607caf20d171314adae0a60618a52ae2f..3cbee832e7967d07bdf736ffff279926571b5fbd 100644 --- a/fs/ext4/block_validity.c +++ b/fs/ext4/block_validity.c @@ -153,6 +153,7 @@ static int ext4_protect_reserved_inode(struct super_block *sb, u32 ino) return PTR_ERR(inode); num = (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits; while (i < num) { + cond_resched(); map.m_lblk = i; map.m_len = num - i; n = ext4_map_blocks(NULL, inode, &map, 0); diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c index ee766e3bed8b6182502f7d169e3b176ac4301389..1755a1dab5d5e5400ee6e4f10897ea6396c72c87 100644 --- a/fs/ext4/dir.c +++ b/fs/ext4/dir.c @@ -29,6 +29,8 @@ #include "ext4.h" #include "xattr.h" +#define DOTDOT_OFFSET 12 + static int ext4_dx_readdir(struct file *, struct dir_context *); /** @@ -51,6 +53,19 @@ static int is_dx_dir(struct inode *inode) return 0; } +static bool is_fake_entry(struct inode *dir, ext4_lblk_t lblk, + unsigned int offset, unsigned int blocksize) +{ + /* Entries in the first block before this value refer to . or .. */ + if (lblk == 0 && offset <= DOTDOT_OFFSET) + return true; + /* Check if this is likely the csum entry */ + if (ext4_has_metadata_csum(dir->i_sb) && offset % blocksize == + blocksize - sizeof(struct ext4_dir_entry_tail)) + return true; + return false; +} + /* * Return 0 if the directory entry is OK, and 1 if there is a problem * @@ -63,25 +78,30 @@ int __ext4_check_dir_entry(const char *function, unsigned int line, struct inode *dir, struct file *filp, struct ext4_dir_entry_2 *de, struct buffer_head *bh, char *buf, int size, + ext4_lblk_t lblk, unsigned int offset) { const char *error_msg = NULL; const int rlen = ext4_rec_len_from_disk(de->rec_len, dir->i_sb->s_blocksize); + const int next_offset = ((char *) de - buf) + rlen; + unsigned int blocksize = dir->i_sb->s_blocksize; + bool fake = is_fake_entry(dir, lblk, offset, blocksize); + bool next_fake = is_fake_entry(dir, lblk, next_offset, blocksize); - if (unlikely(rlen < EXT4_DIR_REC_LEN(1))) + if (unlikely(rlen < ext4_dir_rec_len(1, fake ? NULL : dir))) error_msg = "rec_len is smaller than minimal"; else if (unlikely(rlen % 4 != 0)) error_msg = "rec_len % 4 != 0"; - else if (unlikely(rlen < EXT4_DIR_REC_LEN(de->name_len))) + else if (unlikely(rlen < ext4_dir_rec_len(de->name_len, + fake ? NULL : dir))) error_msg = "rec_len is too small for name_len"; else if (unlikely(((char *) de - buf) + rlen > size)) error_msg = "directory entry overrun"; - else if (unlikely(((char *) de - buf) + rlen > - size - EXT4_DIR_REC_LEN(1) && - ((char *) de - buf) + rlen != size)) { + else if (unlikely(next_offset > size - ext4_dir_rec_len(1, + next_fake ? NULL : dir) && + next_offset != size)) error_msg = "directory entry too close to block end"; - } else if (unlikely(le32_to_cpu(de->inode) > le32_to_cpu(EXT4_SB(dir->i_sb)->s_es->s_inodes_count))) error_msg = "inode out of bounds"; @@ -91,15 +111,15 @@ int __ext4_check_dir_entry(const char *function, unsigned int line, if (filp) ext4_error_file(filp, function, line, bh->b_blocknr, "bad entry in directory: %s - offset=%u, " - "inode=%u, rec_len=%d, name_len=%d, size=%d", + "inode=%u, rec_len=%d, lblk=%d, size=%d fake=%d", error_msg, offset, le32_to_cpu(de->inode), - rlen, de->name_len, size); + rlen, lblk, size, fake); else ext4_error_inode(dir, function, line, bh->b_blocknr, "bad entry in directory: %s - offset=%u, " - "inode=%u, rec_len=%d, name_len=%d, size=%d", + "inode=%u, rec_len=%d, lblk=%d, size=%d fake=%d", error_msg, offset, le32_to_cpu(de->inode), - rlen, de->name_len, size); + rlen, lblk, size, fake); return 1; } @@ -224,7 +244,8 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx) * failure will be detected in the * dirent test below. */ if (ext4_rec_len_from_disk(de->rec_len, - sb->s_blocksize) < EXT4_DIR_REC_LEN(1)) + sb->s_blocksize) < ext4_dir_rec_len(1, + inode)) break; i += ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize); @@ -240,7 +261,7 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx) de = (struct ext4_dir_entry_2 *) (bh->b_data + offset); if (ext4_check_dir_entry(inode, file, de, bh, bh->b_data, bh->b_size, - offset)) { + map.m_lblk, offset)) { /* * On error, skip to the next block */ @@ -265,7 +286,9 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx) /* Directory is encrypted */ err = fscrypt_fname_disk_to_usr(inode, - 0, 0, &de_name, &fstr); + EXT4_DIRENT_HASH(de), + EXT4_DIRENT_MINOR_HASH(de), + &de_name, &fstr); de_name = fstr; fstr.len = save_len; if (err) @@ -640,7 +663,7 @@ int ext4_check_all_de(struct inode *dir, struct buffer_head *bh, void *buf, top = buf + buf_size; while ((char *) de < top) { if (ext4_check_dir_entry(dir, NULL, de, bh, - buf, buf_size, offset)) + buf, buf_size, 0, offset)) return -EFSCORRUPTED; rlen = ext4_rec_len_from_disk(de->rec_len, buf_size); de = (struct ext4_dir_entry_2 *)((char *)de + rlen); diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index a2d6e8f0eb971ddb357e6e549b7fb4352cc3cd24..1e71fb42a841d583383e178d3f6304407499789c 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -1362,11 +1362,9 @@ struct ext4_super_block { */ #define EXT4_MF_MNTDIR_SAMPLED 0x0001 #define EXT4_MF_FS_ABORTED 0x0002 /* Fatal error detected */ -#define EXT4_MF_TEST_DUMMY_ENCRYPTION 0x0004 #ifdef CONFIG_FS_ENCRYPTION -#define DUMMY_ENCRYPTION_ENABLED(sbi) (unlikely((sbi)->s_mount_flags & \ - EXT4_MF_TEST_DUMMY_ENCRYPTION)) +#define DUMMY_ENCRYPTION_ENABLED(sbi) ((sbi)->s_dummy_enc_ctx.ctx != NULL) #else #define DUMMY_ENCRYPTION_ENABLED(sbi) (0) #endif @@ -1546,6 +1544,9 @@ struct ext4_sb_info { struct ratelimit_state s_warning_ratelimit_state; struct ratelimit_state s_msg_ratelimit_state; + /* Encryption context for '-o test_dummy_encryption' */ + struct fscrypt_dummy_context s_dummy_enc_ctx; + /* * Barrier between writepages ops and changing any inode's JOURNAL_DATA * or EXTENTS flag. @@ -1966,6 +1967,17 @@ struct ext4_dir_entry { char name[EXT4_NAME_LEN]; /* File name */ }; + +/* + * Encrypted Casefolded entries require saving the hash on disk. This structure + * followed ext4_dir_entry_2's name[name_len] at the next 4 byte aligned + * boundary. + */ +struct ext4_dir_entry_hash { + __le32 hash; + __le32 minor_hash; +}; + /* * The new version of the directory entry. Since EXT4 structures are * stored in intel byte order, and the name_len field could never be @@ -1980,6 +1992,22 @@ struct ext4_dir_entry_2 { char name[EXT4_NAME_LEN]; /* File name */ }; +/* + * Access the hashes at the end of ext4_dir_entry_2 + */ +#define EXT4_DIRENT_HASHES(entry) \ + ((struct ext4_dir_entry_hash *) \ + (((void *)(entry)) + \ + ((8 + (entry)->name_len + EXT4_DIR_ROUND) & ~EXT4_DIR_ROUND))) +#define EXT4_DIRENT_HASH(entry) le32_to_cpu(EXT4_DIRENT_HASHES(de)->hash) +#define EXT4_DIRENT_MINOR_HASH(entry) \ + le32_to_cpu(EXT4_DIRENT_HASHES(de)->minor_hash) + +static inline bool ext4_hash_in_dirent(const struct inode *inode) +{ + return IS_CASEFOLDED(inode) && IS_ENCRYPTED(inode); +} + /* * This is a bogus directory entry at the end of each leaf block that * records checksums. @@ -2021,10 +2049,24 @@ struct ext4_dir_entry_tail { */ #define EXT4_DIR_PAD 4 #define EXT4_DIR_ROUND (EXT4_DIR_PAD - 1) -#define EXT4_DIR_REC_LEN(name_len) (((name_len) + 8 + EXT4_DIR_ROUND) & \ - ~EXT4_DIR_ROUND) #define EXT4_MAX_REC_LEN ((1<<16)-1) +/* + * The rec_len is dependent on the type of directory. Directories that are + * casefolded and encrypted need to store the hash as well, so we add room for + * ext4_extended_dir_entry_2. For all entries related to '.' or '..' you should + * pass NULL for dir, as those entries do not use the extra fields. + */ +static inline unsigned int ext4_dir_rec_len(__u8 name_len, + const struct inode *dir) +{ + int rec_len = (name_len + 8 + EXT4_DIR_ROUND); + + if (dir && ext4_hash_in_dirent(dir)) + rec_len += sizeof(struct ext4_dir_entry_hash); + return (rec_len & ~EXT4_DIR_ROUND); +} + /* * If we ever get support for fs block sizes > page_size, we'll need * to remove the #if statements in the next two functions... @@ -2081,6 +2123,7 @@ static inline __le16 ext4_rec_len_to_disk(unsigned len, unsigned blocksize) #define DX_HASH_LEGACY_UNSIGNED 3 #define DX_HASH_HALF_MD4_UNSIGNED 4 #define DX_HASH_TEA_UNSIGNED 5 +#define DX_HASH_SIPHASH 6 static inline u32 ext4_chksum(struct ext4_sb_info *sbi, u32 crc, const void *address, unsigned int length) @@ -2138,6 +2181,7 @@ struct ext4_filename { }; #define fname_name(p) ((p)->disk_name.name) +#define fname_usr_name(p) ((p)->usr_fname->name) #define fname_len(p) ((p)->disk_name.len) /* @@ -2361,9 +2405,9 @@ extern unsigned ext4_free_clusters_after_init(struct super_block *sb, ext4_fsblk_t ext4_inode_to_goal_block(struct inode *); #ifdef CONFIG_UNICODE -extern void ext4_fname_setup_ci_filename(struct inode *dir, +extern int ext4_fname_setup_ci_filename(struct inode *dir, const struct qstr *iname, - struct fscrypt_str *fname); + struct ext4_filename *fname); #endif #ifdef CONFIG_FS_ENCRYPTION @@ -2394,9 +2438,9 @@ static inline int ext4_fname_setup_filename(struct inode *dir, ext4_fname_from_fscrypt_name(fname, &name); #ifdef CONFIG_UNICODE - ext4_fname_setup_ci_filename(dir, iname, &fname->cf_name); + err = ext4_fname_setup_ci_filename(dir, iname, fname); #endif - return 0; + return err; } static inline int ext4_fname_prepare_lookup(struct inode *dir, @@ -2413,9 +2457,9 @@ static inline int ext4_fname_prepare_lookup(struct inode *dir, ext4_fname_from_fscrypt_name(fname, &name); #ifdef CONFIG_UNICODE - ext4_fname_setup_ci_filename(dir, &dentry->d_name, &fname->cf_name); + err = ext4_fname_setup_ci_filename(dir, &dentry->d_name, fname); #endif - return 0; + return err; } static inline void ext4_fname_free_filename(struct ext4_filename *fname) @@ -2440,15 +2484,16 @@ static inline int ext4_fname_setup_filename(struct inode *dir, int lookup, struct ext4_filename *fname) { + int err = 0; fname->usr_fname = iname; fname->disk_name.name = (unsigned char *) iname->name; fname->disk_name.len = iname->len; #ifdef CONFIG_UNICODE - ext4_fname_setup_ci_filename(dir, iname, &fname->cf_name); + err = ext4_fname_setup_ci_filename(dir, iname, fname); #endif - return 0; + return err; } static inline int ext4_fname_prepare_lookup(struct inode *dir, @@ -2472,21 +2517,22 @@ extern int __ext4_check_dir_entry(const char *, unsigned int, struct inode *, struct file *, struct ext4_dir_entry_2 *, struct buffer_head *, char *, int, - unsigned int); -#define ext4_check_dir_entry(dir, filp, de, bh, buf, size, offset) \ + ext4_lblk_t, unsigned int); +#define ext4_check_dir_entry(dir, filp, de, bh, buf, size, lblk, offset) \ unlikely(__ext4_check_dir_entry(__func__, __LINE__, (dir), (filp), \ - (de), (bh), (buf), (size), (offset))) + (de), (bh), (buf), (size), (lblk), (offset))) extern int ext4_htree_store_dirent(struct file *dir_file, __u32 hash, __u32 minor_hash, struct ext4_dir_entry_2 *dirent, struct fscrypt_str *ent_name); extern void ext4_htree_free_dir_info(struct dir_private_info *p); extern int ext4_find_dest_de(struct inode *dir, struct inode *inode, + ext4_lblk_t lblk, struct buffer_head *bh, void *buf, int buf_size, struct ext4_filename *fname, struct ext4_dir_entry_2 **dest_de); -void ext4_insert_dentry(struct inode *inode, +void ext4_insert_dentry(struct inode *dir, struct inode *inode, struct ext4_dir_entry_2 *de, int buf_size, struct ext4_filename *fname); @@ -2671,11 +2717,12 @@ extern int ext4_search_dir(struct buffer_head *bh, int buf_size, struct inode *dir, struct ext4_filename *fname, - unsigned int offset, + ext4_lblk_t lblk, unsigned int offset, struct ext4_dir_entry_2 **res_dir); extern int ext4_generic_delete_entry(handle_t *handle, struct inode *dir, struct ext4_dir_entry_2 *de_del, + ext4_lblk_t lblk, struct buffer_head *bh, void *entry_buf, int buf_size, @@ -3201,9 +3248,6 @@ extern void initialize_dirent_tail(struct ext4_dir_entry_tail *t, extern int ext4_handle_dirty_dirent_node(handle_t *handle, struct inode *inode, struct buffer_head *bh); -extern int ext4_ci_compare(const struct inode *parent, - const struct qstr *fname, - const struct qstr *entry, bool quick); #define S_SHIFT 12 static const unsigned char ext4_type_by_mode[(S_IFMT >> S_SHIFT) + 1] = { diff --git a/fs/ext4/ext4_extents.h b/fs/ext4/ext4_extents.h index a284fb28944b5d9f2699378d14cdad4a1c477932..63291c265aa0d6c6ca3ddf5c264e4cf2a4706269 100644 --- a/fs/ext4/ext4_extents.h +++ b/fs/ext4/ext4_extents.h @@ -169,10 +169,13 @@ struct ext4_ext_path { (EXT_FIRST_EXTENT((__hdr__)) + le16_to_cpu((__hdr__)->eh_entries) - 1) #define EXT_LAST_INDEX(__hdr__) \ (EXT_FIRST_INDEX((__hdr__)) + le16_to_cpu((__hdr__)->eh_entries) - 1) -#define EXT_MAX_EXTENT(__hdr__) \ - (EXT_FIRST_EXTENT((__hdr__)) + le16_to_cpu((__hdr__)->eh_max) - 1) +#define EXT_MAX_EXTENT(__hdr__) \ + ((le16_to_cpu((__hdr__)->eh_max)) ? \ + ((EXT_FIRST_EXTENT((__hdr__)) + le16_to_cpu((__hdr__)->eh_max) - 1)) \ + : 0) #define EXT_MAX_INDEX(__hdr__) \ - (EXT_FIRST_INDEX((__hdr__)) + le16_to_cpu((__hdr__)->eh_max) - 1) + ((le16_to_cpu((__hdr__)->eh_max)) ? \ + ((EXT_FIRST_INDEX((__hdr__)) + le16_to_cpu((__hdr__)->eh_max) - 1)) : 0) static inline struct ext4_extent_header *ext_inode_hdr(struct inode *inode) { diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 2824b5c6518744783abb9181de31dc3633161b95..29b041040ea2d50668eef7a2df8e8fe52732a5e3 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -2917,7 +2917,7 @@ int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start, * in use to avoid freeing it when removing blocks. */ if (sbi->s_cluster_ratio > 1) { - pblk = ext4_ext_pblock(ex) + end - ee_block + 2; + pblk = ext4_ext_pblock(ex) + end - ee_block + 1; partial_cluster = -(long long) EXT4_B2C(sbi, pblk); } diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c index 5508baa11bb669545724b998af92d9a0375af054..8a28d47bd502aa7f47a95cb2dfb3e5f9137083a5 100644 --- a/fs/ext4/fsync.c +++ b/fs/ext4/fsync.c @@ -44,30 +44,28 @@ */ static int ext4_sync_parent(struct inode *inode) { - struct dentry *dentry = NULL; - struct inode *next; + struct dentry *dentry, *next; int ret = 0; if (!ext4_test_inode_state(inode, EXT4_STATE_NEWENTRY)) return 0; - inode = igrab(inode); + dentry = d_find_any_alias(inode); + if (!dentry) + return 0; while (ext4_test_inode_state(inode, EXT4_STATE_NEWENTRY)) { ext4_clear_inode_state(inode, EXT4_STATE_NEWENTRY); - dentry = d_find_any_alias(inode); - if (!dentry) - break; - next = igrab(d_inode(dentry->d_parent)); + + next = dget_parent(dentry); dput(dentry); - if (!next) - break; - iput(inode); - inode = next; + dentry = next; + inode = dentry->d_inode; + /* * The directory inode may have gone through rmdir by now. But * the inode itself and its blocks are still allocated (we hold - * a reference to the inode so it didn't go through - * ext4_evict_inode()) and so we are safe to flush metadata - * blocks and the inode. + * a reference to the inode via its dentry), so it didn't go + * through ext4_evict_inode()) and so we are safe to flush + * metadata blocks and the inode. */ ret = sync_mapping_buffers(inode->i_mapping); if (ret) @@ -76,7 +74,7 @@ static int ext4_sync_parent(struct inode *inode) if (ret) break; } - iput(inode); + dput(dentry); return ret; } diff --git a/fs/ext4/hash.c b/fs/ext4/hash.c index f3bc69b8d4e5351c5caad61d6f3ece89a8899258..ea735403fbb1efe895d612911864dec5a39ee363 100644 --- a/fs/ext4/hash.c +++ b/fs/ext4/hash.c @@ -201,7 +201,7 @@ static void str2hashbuf_unsigned(const char *msg, int len, __u32 *buf, int num) * represented, and whether or not the returned hash is 32 bits or 64 * bits. 32 bit hashes will return 0 for the minor hash. */ -static int __ext4fs_dirhash(const char *name, int len, +static int __ext4fs_dirhash(const struct inode *dir, const char *name, int len, struct dx_hash_info *hinfo) { __u32 hash; @@ -261,6 +261,22 @@ static int __ext4fs_dirhash(const char *name, int len, hash = buf[0]; minor_hash = buf[1]; break; + case DX_HASH_SIPHASH: + { + struct qstr qname = QSTR_INIT(name, len); + __u64 combined_hash; + + if (fscrypt_has_encryption_key(dir)) { + combined_hash = fscrypt_fname_siphash(dir, &qname); + } else { + ext4_warning_inode(dir, "Siphash requires key"); + return -1; + } + + hash = (__u32)(combined_hash >> 32); + minor_hash = (__u32)combined_hash; + break; + } default: hinfo->hash = 0; return -1; @@ -282,7 +298,7 @@ int ext4fs_dirhash(const struct inode *dir, const char *name, int len, unsigned char *buff; struct qstr qstr = {.name = name, .len = len }; - if (len && IS_CASEFOLDED(dir) && um) { + if (len && needs_casefold(dir) && um) { buff = kzalloc(sizeof(char) * PATH_MAX, GFP_KERNEL); if (!buff) return -ENOMEM; @@ -293,12 +309,12 @@ int ext4fs_dirhash(const struct inode *dir, const char *name, int len, goto opaque_seq; } - r = __ext4fs_dirhash(buff, dlen, hinfo); + r = __ext4fs_dirhash(dir, buff, dlen, hinfo); kfree(buff); return r; } opaque_seq: #endif - return __ext4fs_dirhash(name, len, hinfo); + return __ext4fs_dirhash(dir, name, len, hinfo); } diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c index b9d4cb194462e50b41e683ac0db74232ba2f8092..2f76551aad093f5ae81e9777e7e3c85323c3855e 100644 --- a/fs/ext4/ialloc.c +++ b/fs/ext4/ialloc.c @@ -464,7 +464,10 @@ static int find_group_orlov(struct super_block *sb, struct inode *parent, int ret = -1; if (qstr) { - hinfo.hash_version = DX_HASH_HALF_MD4; + if (ext4_hash_in_dirent(parent)) + hinfo.hash_version = DX_HASH_SIPHASH; + else + hinfo.hash_version = DX_HASH_HALF_MD4; hinfo.seed = sbi->s_hash_seed; ext4fs_dirhash(parent, qstr->name, qstr->len, &hinfo); grp = hinfo.hash; diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c index f737d5b1ca3b4e691442f614c92df61dbb572cba..a71cda05b630e5d819fa2d13de120ca0ca0d1b36 100644 --- a/fs/ext4/inline.c +++ b/fs/ext4/inline.c @@ -1016,7 +1016,7 @@ void ext4_show_inline_dir(struct inode *dir, struct buffer_head *bh, offset, de_len, de->name_len, de->name, de->name_len, le32_to_cpu(de->inode)); if (ext4_check_dir_entry(dir, NULL, de, bh, - inline_start, inline_size, offset)) + inline_start, inline_size, 0, offset)) BUG(); offset += de_len; @@ -1042,7 +1042,7 @@ static int ext4_add_dirent_to_inline(handle_t *handle, int err; struct ext4_dir_entry_2 *de; - err = ext4_find_dest_de(dir, inode, iloc->bh, inline_start, + err = ext4_find_dest_de(dir, inode, 0, iloc->bh, inline_start, inline_size, fname, &de); if (err) return err; @@ -1051,7 +1051,7 @@ static int ext4_add_dirent_to_inline(handle_t *handle, err = ext4_journal_get_write_access(handle, iloc->bh); if (err) return err; - ext4_insert_dentry(inode, de, inline_size, fname); + ext4_insert_dentry(dir, inode, de, inline_size, fname); ext4_show_inline_dir(dir, iloc->bh, inline_start, inline_size); @@ -1120,7 +1120,7 @@ static int ext4_update_inline_dir(handle_t *handle, struct inode *dir, int old_size = EXT4_I(dir)->i_inline_size - EXT4_MIN_INLINE_DATA_SIZE; int new_size = get_max_inline_xattr_value_size(dir, iloc); - if (new_size - old_size <= EXT4_DIR_REC_LEN(1)) + if (new_size - old_size <= ext4_dir_rec_len(1, NULL)) return -ENOSPC; ret = ext4_update_inline_data(handle, dir, @@ -1401,8 +1401,8 @@ int htree_inlinedir_to_tree(struct file *dir_file, fake.name_len = 1; strcpy(fake.name, "."); fake.rec_len = ext4_rec_len_to_disk( - EXT4_DIR_REC_LEN(fake.name_len), - inline_size); + ext4_dir_rec_len(fake.name_len, NULL), + inline_size); ext4_set_de_type(inode->i_sb, &fake, S_IFDIR); de = &fake; pos = EXT4_INLINE_DOTDOT_OFFSET; @@ -1411,8 +1411,8 @@ int htree_inlinedir_to_tree(struct file *dir_file, fake.name_len = 2; strcpy(fake.name, ".."); fake.rec_len = ext4_rec_len_to_disk( - EXT4_DIR_REC_LEN(fake.name_len), - inline_size); + ext4_dir_rec_len(fake.name_len, NULL), + inline_size); ext4_set_de_type(inode->i_sb, &fake, S_IFDIR); de = &fake; pos = EXT4_INLINE_DOTDOT_SIZE; @@ -1421,13 +1421,18 @@ int htree_inlinedir_to_tree(struct file *dir_file, pos += ext4_rec_len_from_disk(de->rec_len, inline_size); if (ext4_check_dir_entry(inode, dir_file, de, iloc.bh, dir_buf, - inline_size, pos)) { + inline_size, block, pos)) { ret = count; goto out; } } - ext4fs_dirhash(dir, de->name, de->name_len, hinfo); + if (ext4_hash_in_dirent(dir)) { + hinfo->hash = EXT4_DIRENT_HASH(de); + hinfo->minor_hash = EXT4_DIRENT_MINOR_HASH(de); + } else { + ext4fs_dirhash(dir, de->name, de->name_len, hinfo); + } if ((hinfo->hash < start_hash) || ((hinfo->hash == start_hash) && (hinfo->minor_hash < start_minor_hash))) @@ -1509,8 +1514,8 @@ int ext4_read_inline_dir(struct file *file, * So we will use extra_offset and extra_size to indicate them * during the inline dir iteration. */ - dotdot_offset = EXT4_DIR_REC_LEN(1); - dotdot_size = dotdot_offset + EXT4_DIR_REC_LEN(2); + dotdot_offset = ext4_dir_rec_len(1, NULL); + dotdot_size = dotdot_offset + ext4_dir_rec_len(2, NULL); extra_offset = dotdot_size - EXT4_INLINE_DOTDOT_SIZE; extra_size = extra_offset + inline_size; @@ -1545,7 +1550,7 @@ int ext4_read_inline_dir(struct file *file, * failure will be detected in the * dirent test below. */ if (ext4_rec_len_from_disk(de->rec_len, extra_size) - < EXT4_DIR_REC_LEN(1)) + < ext4_dir_rec_len(1, NULL)) break; i += ext4_rec_len_from_disk(de->rec_len, extra_size); @@ -1573,7 +1578,7 @@ int ext4_read_inline_dir(struct file *file, de = (struct ext4_dir_entry_2 *) (dir_buf + ctx->pos - extra_offset); if (ext4_check_dir_entry(inode, file, de, iloc.bh, dir_buf, - extra_size, ctx->pos)) + extra_size, 0, ctx->pos)) goto out; if (le32_to_cpu(de->inode)) { if (!dir_emit(ctx, de->name, de->name_len, @@ -1665,7 +1670,7 @@ struct buffer_head *ext4_find_inline_entry(struct inode *dir, EXT4_INLINE_DOTDOT_SIZE; inline_size = EXT4_MIN_INLINE_DATA_SIZE - EXT4_INLINE_DOTDOT_SIZE; ret = ext4_search_dir(iloc.bh, inline_start, inline_size, - dir, fname, 0, res_dir); + dir, fname, 0, 0, res_dir); if (ret == 1) goto out_find; if (ret < 0) @@ -1678,7 +1683,7 @@ struct buffer_head *ext4_find_inline_entry(struct inode *dir, inline_size = ext4_get_inline_size(dir) - EXT4_MIN_INLINE_DATA_SIZE; ret = ext4_search_dir(iloc.bh, inline_start, inline_size, - dir, fname, 0, res_dir); + dir, fname, 0, 0, res_dir); if (ret == 1) goto out_find; @@ -1727,7 +1732,7 @@ int ext4_delete_inline_entry(handle_t *handle, if (err) goto out; - err = ext4_generic_delete_entry(handle, dir, de_del, bh, + err = ext4_generic_delete_entry(handle, dir, de_del, 0, bh, inline_start, inline_size, 0); if (err) goto out; @@ -1811,7 +1816,7 @@ bool empty_inline_dir(struct inode *dir, int *has_inline_data) &inline_pos, &inline_size); if (ext4_check_dir_entry(dir, NULL, de, iloc.bh, inline_pos, - inline_size, offset)) { + inline_size, 0, offset)) { ext4_warning(dir->i_sb, "bad inline directory (dir #%lu) - " "inode %u, rec_len %u, name_len %d" diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index b134add5a5add673b10b387e69fbed086866521b..650f0ef14fcc935542a05c2a2533d65319aaa26e 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -276,9 +276,11 @@ static int dx_make_map(struct inode *dir, struct ext4_dir_entry_2 *de, unsigned blocksize, struct dx_hash_info *hinfo, struct dx_map_entry map[]); static void dx_sort_map(struct dx_map_entry *map, unsigned count); -static struct ext4_dir_entry_2 *dx_move_dirents(char *from, char *to, - struct dx_map_entry *offsets, int count, unsigned blocksize); -static struct ext4_dir_entry_2* dx_pack_dirents(char *base, unsigned blocksize); +static struct ext4_dir_entry_2 *dx_move_dirents(struct inode *dir, char *from, + char *to, struct dx_map_entry *offsets, + int count, unsigned int blocksize); +static struct ext4_dir_entry_2 *dx_pack_dirents(struct inode *dir, char *base, + unsigned int blocksize); static void dx_insert_block(struct dx_frame *frame, u32 hash, ext4_lblk_t block); static int ext4_htree_next_block(struct inode *dir, __u32 hash, @@ -287,7 +289,7 @@ static int ext4_htree_next_block(struct inode *dir, __u32 hash, __u32 *start_hash); static struct buffer_head * ext4_dx_find_entry(struct inode *dir, struct ext4_filename *fname, - struct ext4_dir_entry_2 **res_dir); + struct ext4_dir_entry_2 **res_dir, ext4_lblk_t *lblk); static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname, struct inode *dir, struct inode *inode); @@ -569,8 +571,9 @@ static inline void dx_set_limit(struct dx_entry *entries, unsigned value) static inline unsigned dx_root_limit(struct inode *dir, unsigned infosize) { - unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(1) - - EXT4_DIR_REC_LEN(2) - infosize; + unsigned int entry_space = dir->i_sb->s_blocksize - + ext4_dir_rec_len(1, NULL) - + ext4_dir_rec_len(2, NULL) - infosize; if (ext4_has_metadata_csum(dir->i_sb)) entry_space -= sizeof(struct dx_tail); @@ -579,7 +582,8 @@ static inline unsigned dx_root_limit(struct inode *dir, unsigned infosize) static inline unsigned dx_node_limit(struct inode *dir) { - unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(0); + unsigned int entry_space = dir->i_sb->s_blocksize - + ext4_dir_rec_len(0, dir); if (ext4_has_metadata_csum(dir->i_sb)) entry_space -= sizeof(struct dx_tail); @@ -675,7 +679,10 @@ static struct stats dx_show_leaf(struct inode *dir, name = fname_crypto_str.name; len = fname_crypto_str.len; } - ext4fs_dirhash(dir, de->name, + if (IS_CASEFOLDED(dir)) + h.hash = EXT4_DIRENT_HASH(de); + else + ext4fs_dirhash(dir, de->name, de->name_len, &h); printk("%*.s:(E)%x.%u ", len, name, h.hash, (unsigned) ((char *) de @@ -691,7 +698,7 @@ static struct stats dx_show_leaf(struct inode *dir, (unsigned) ((char *) de - base)); #endif } - space += EXT4_DIR_REC_LEN(de->name_len); + space += ext4_dir_rec_len(de->name_len, dir); names++; } de = ext4_next_entry(de, size); @@ -763,18 +770,34 @@ dx_probe(struct ext4_filename *fname, struct inode *dir, root = (struct dx_root *) frame->bh->b_data; if (root->info.hash_version != DX_HASH_TEA && root->info.hash_version != DX_HASH_HALF_MD4 && - root->info.hash_version != DX_HASH_LEGACY) { + root->info.hash_version != DX_HASH_LEGACY && + root->info.hash_version != DX_HASH_SIPHASH) { ext4_warning_inode(dir, "Unrecognised inode hash code %u", root->info.hash_version); goto fail; } + if (ext4_hash_in_dirent(dir)) { + if (root->info.hash_version != DX_HASH_SIPHASH) { + ext4_warning_inode(dir, + "Hash in dirent, but hash is not SIPHASH"); + goto fail; + } + } else { + if (root->info.hash_version == DX_HASH_SIPHASH) { + ext4_warning_inode(dir, + "Hash code is SIPHASH, but hash not in dirent"); + goto fail; + } + } if (fname) hinfo = &fname->hinfo; hinfo->hash_version = root->info.hash_version; if (hinfo->hash_version <= DX_HASH_TEA) hinfo->hash_version += EXT4_SB(dir->i_sb)->s_hash_unsigned; hinfo->seed = EXT4_SB(dir->i_sb)->s_hash_seed; - if (fname && fname_name(fname)) + /* hash is already computed for encrypted casefolded directory */ + if (fname && fname_name(fname) && + !(IS_ENCRYPTED(dir) && IS_CASEFOLDED(dir))) ext4fs_dirhash(dir, fname_name(fname), fname_len(fname), hinfo); hash = hinfo->hash; @@ -989,6 +1012,7 @@ static int htree_dirblock_to_tree(struct file *dir_file, struct ext4_dir_entry_2 *de, *top; int err = 0, count = 0; struct fscrypt_str fname_crypto_str = FSTR_INIT(NULL, 0), tmp_str; + int csum = ext4_has_metadata_csum(dir->i_sb); dxtrace(printk(KERN_INFO "In htree dirblock_to_tree: block %lu\n", (unsigned long)block)); @@ -997,9 +1021,11 @@ static int htree_dirblock_to_tree(struct file *dir_file, return PTR_ERR(bh); de = (struct ext4_dir_entry_2 *) bh->b_data; + /* csum entries are not larger in the casefolded encrypted case */ top = (struct ext4_dir_entry_2 *) ((char *) de + dir->i_sb->s_blocksize - - EXT4_DIR_REC_LEN(0)); + ext4_dir_rec_len(0, + csum ? NULL : dir)); #ifdef CONFIG_FS_ENCRYPTION /* Check if the directory is encrypted */ if (IS_ENCRYPTED(dir)) { @@ -1018,13 +1044,23 @@ static int htree_dirblock_to_tree(struct file *dir_file, #endif for (; de < top; de = ext4_next_entry(de, dir->i_sb->s_blocksize)) { if (ext4_check_dir_entry(dir, NULL, de, bh, - bh->b_data, bh->b_size, + bh->b_data, bh->b_size, block, (block<i_sb)) + ((char *)de - bh->b_data))) { /* silently ignore the rest of the block */ break; } - ext4fs_dirhash(dir, de->name, de->name_len, hinfo); + if (ext4_hash_in_dirent(dir)) { + if (de->name_len && de->inode) { + hinfo->hash = EXT4_DIRENT_HASH(de); + hinfo->minor_hash = EXT4_DIRENT_MINOR_HASH(de); + } else { + hinfo->hash = 0; + hinfo->minor_hash = 0; + } + } else { + ext4fs_dirhash(dir, de->name, de->name_len, hinfo); + } if ((hinfo->hash < start_hash) || ((hinfo->hash == start_hash) && (hinfo->minor_hash < start_minor_hash))) @@ -1095,7 +1131,11 @@ int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash, start_hash, start_minor_hash)); dir = file_inode(dir_file); if (!(ext4_test_inode_flag(dir, EXT4_INODE_INDEX))) { - hinfo.hash_version = EXT4_SB(dir->i_sb)->s_def_hash_version; + if (ext4_hash_in_dirent(dir)) + hinfo.hash_version = DX_HASH_SIPHASH; + else + hinfo.hash_version = + EXT4_SB(dir->i_sb)->s_def_hash_version; if (hinfo.hash_version <= DX_HASH_TEA) hinfo.hash_version += EXT4_SB(dir->i_sb)->s_hash_unsigned; @@ -1188,11 +1228,12 @@ int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash, static inline int search_dirblock(struct buffer_head *bh, struct inode *dir, struct ext4_filename *fname, + ext4_lblk_t lblk, unsigned int offset, struct ext4_dir_entry_2 **res_dir) { return ext4_search_dir(bh, bh->b_data, dir->i_sb->s_blocksize, dir, - fname, offset, res_dir); + fname, lblk, offset, res_dir); } /* @@ -1213,7 +1254,10 @@ static int dx_make_map(struct inode *dir, struct ext4_dir_entry_2 *de, while ((char *) de < base + blocksize) { if (de->name_len && de->inode) { - ext4fs_dirhash(dir, de->name, de->name_len, &h); + if (ext4_hash_in_dirent(dir)) + h.hash = EXT4_DIRENT_HASH(de); + else + ext4fs_dirhash(dir, de->name, de->name_len, &h); map_tail--; map_tail->hash = h.hash; map_tail->offs = ((char *) de - base)>>2; @@ -1277,47 +1321,65 @@ static void dx_insert_block(struct dx_frame *frame, u32 hash, ext4_lblk_t block) * Returns: 0 if the directory entry matches, more than 0 if it * doesn't match or less than zero on error. */ -int ext4_ci_compare(const struct inode *parent, const struct qstr *name, - const struct qstr *entry, bool quick) +static int ext4_ci_compare(const struct inode *parent, const struct qstr *name, + u8 *de_name, size_t de_name_len, bool quick) { const struct super_block *sb = parent->i_sb; const struct unicode_map *um = sb->s_encoding; + struct fscrypt_str decrypted_name = FSTR_INIT(NULL, de_name_len); + struct qstr entry = QSTR_INIT(de_name, de_name_len); int ret; + if (IS_ENCRYPTED(parent)) { + const struct fscrypt_str encrypted_name = + FSTR_INIT(de_name, de_name_len); + + decrypted_name.name = kmalloc(de_name_len, GFP_KERNEL); + if (!decrypted_name.name) + return -ENOMEM; + ret = fscrypt_fname_disk_to_usr(parent, 0, 0, &encrypted_name, + &decrypted_name); + if (ret < 0) + goto out; + entry.name = decrypted_name.name; + entry.len = decrypted_name.len; + } + if (quick) - ret = utf8_strncasecmp_folded(um, name, entry); + ret = utf8_strncasecmp_folded(um, name, &entry); else - ret = utf8_strncasecmp(um, name, entry); - + ret = utf8_strncasecmp(um, name, &entry); if (ret < 0) { /* Handle invalid character sequence as either an error * or as an opaque byte sequence. */ if (sb_has_enc_strict_mode(sb)) - return -EINVAL; - - if (name->len != entry->len) - return 1; - - return !!memcmp(name->name, entry->name, name->len); + ret = -EINVAL; + else if (name->len != entry.len) + ret = 1; + else + ret = !!memcmp(name->name, entry.name, entry.len); } - +out: + kfree(decrypted_name.name); return ret; } -void ext4_fname_setup_ci_filename(struct inode *dir, const struct qstr *iname, - struct fscrypt_str *cf_name) +int ext4_fname_setup_ci_filename(struct inode *dir, const struct qstr *iname, + struct ext4_filename *name) { + struct fscrypt_str *cf_name = &name->cf_name; + struct dx_hash_info *hinfo = &name->hinfo; int len; if (!needs_casefold(dir)) { cf_name->name = NULL; - return; + return 0; } cf_name->name = kmalloc(EXT4_NAME_LEN, GFP_NOFS); if (!cf_name->name) - return; + return -ENOMEM; len = utf8_casefold(dir->i_sb->s_encoding, iname, cf_name->name, @@ -1325,10 +1387,18 @@ void ext4_fname_setup_ci_filename(struct inode *dir, const struct qstr *iname, if (len <= 0) { kfree(cf_name->name); cf_name->name = NULL; - return; } cf_name->len = (unsigned) len; + if (!IS_ENCRYPTED(dir)) + return 0; + hinfo->hash_version = DX_HASH_SIPHASH; + hinfo->seed = NULL; + if (cf_name->name) + ext4fs_dirhash(dir, cf_name->name, cf_name->len, hinfo); + else + ext4fs_dirhash(dir, iname->name, iname->len, hinfo); + return 0; } #endif @@ -1337,14 +1407,11 @@ void ext4_fname_setup_ci_filename(struct inode *dir, const struct qstr *iname, * * Return: %true if the directory entry matches, otherwise %false. */ -static inline bool ext4_match(const struct inode *parent, +static bool ext4_match(struct inode *parent, const struct ext4_filename *fname, - const struct ext4_dir_entry_2 *de) + struct ext4_dir_entry_2 *de) { struct fscrypt_name f; -#ifdef CONFIG_UNICODE - const struct qstr entry = {.name = de->name, .len = de->name_len}; -#endif if (!de->inode) return false; @@ -1360,10 +1427,19 @@ static inline bool ext4_match(const struct inode *parent, if (fname->cf_name.name) { struct qstr cf = {.name = fname->cf_name.name, .len = fname->cf_name.len}; - return !ext4_ci_compare(parent, &cf, &entry, true); + if (IS_ENCRYPTED(parent)) { + if (fname->hinfo.hash != EXT4_DIRENT_HASH(de) || + fname->hinfo.minor_hash != + EXT4_DIRENT_MINOR_HASH(de)) { + + return 0; + } + } + return !ext4_ci_compare(parent, &cf, de->name, + de->name_len, true); } - return !ext4_ci_compare(parent, fname->usr_fname, &entry, - false); + return !ext4_ci_compare(parent, fname->usr_fname, de->name, + de->name_len, false); } #endif @@ -1375,7 +1451,8 @@ static inline bool ext4_match(const struct inode *parent, */ int ext4_search_dir(struct buffer_head *bh, char *search_buf, int buf_size, struct inode *dir, struct ext4_filename *fname, - unsigned int offset, struct ext4_dir_entry_2 **res_dir) + ext4_lblk_t lblk, unsigned int offset, + struct ext4_dir_entry_2 **res_dir) { struct ext4_dir_entry_2 * de; char * dlimit; @@ -1391,7 +1468,7 @@ int ext4_search_dir(struct buffer_head *bh, char *search_buf, int buf_size, /* found a match - just to be sure, do * a full check */ if (ext4_check_dir_entry(dir, NULL, de, bh, bh->b_data, - bh->b_size, offset)) + bh->b_size, lblk, offset)) return -1; *res_dir = de; return 1; @@ -1437,7 +1514,7 @@ static int is_dx_internal_node(struct inode *dir, ext4_lblk_t block, static struct buffer_head *__ext4_find_entry(struct inode *dir, struct ext4_filename *fname, struct ext4_dir_entry_2 **res_dir, - int *inlined) + int *inlined, ext4_lblk_t *lblk) { struct super_block *sb; struct buffer_head *bh_use[NAMEI_RA_SIZE]; @@ -1461,6 +1538,8 @@ static struct buffer_head *__ext4_find_entry(struct inode *dir, int has_inline_data = 1; ret = ext4_find_inline_entry(dir, fname, res_dir, &has_inline_data); + if (lblk) + *lblk = 0; if (has_inline_data) { if (inlined) *inlined = 1; @@ -1479,7 +1558,7 @@ static struct buffer_head *__ext4_find_entry(struct inode *dir, goto restart; } if (is_dx(dir)) { - ret = ext4_dx_find_entry(dir, fname, res_dir); + ret = ext4_dx_find_entry(dir, fname, res_dir, lblk); /* * On success, or if the error was file not found, * return. Otherwise, fall back to doing a search the @@ -1544,9 +1623,11 @@ static struct buffer_head *__ext4_find_entry(struct inode *dir, goto cleanup_and_exit; } set_buffer_verified(bh); - i = search_dirblock(bh, dir, fname, + i = search_dirblock(bh, dir, fname, block, block << EXT4_BLOCK_SIZE_BITS(sb), res_dir); if (i == 1) { + if (lblk) + *lblk = block; EXT4_I(dir)->i_dir_start_lookup = block; ret = bh; goto cleanup_and_exit; @@ -1581,7 +1662,7 @@ static struct buffer_head *__ext4_find_entry(struct inode *dir, static struct buffer_head *ext4_find_entry(struct inode *dir, const struct qstr *d_name, struct ext4_dir_entry_2 **res_dir, - int *inlined) + int *inlined, ext4_lblk_t *lblk) { int err; struct ext4_filename fname; @@ -1593,7 +1674,7 @@ static struct buffer_head *ext4_find_entry(struct inode *dir, if (err) return ERR_PTR(err); - bh = __ext4_find_entry(dir, &fname, res_dir, inlined); + bh = __ext4_find_entry(dir, &fname, res_dir, inlined, lblk); ext4_fname_free_filename(&fname); return bh; @@ -1614,7 +1695,7 @@ static struct buffer_head *ext4_lookup_entry(struct inode *dir, if (err) return ERR_PTR(err); - bh = __ext4_find_entry(dir, &fname, res_dir, NULL); + bh = __ext4_find_entry(dir, &fname, res_dir, NULL, NULL); ext4_fname_free_filename(&fname); return bh; @@ -1622,7 +1703,7 @@ static struct buffer_head *ext4_lookup_entry(struct inode *dir, static struct buffer_head * ext4_dx_find_entry(struct inode *dir, struct ext4_filename *fname, - struct ext4_dir_entry_2 **res_dir) + struct ext4_dir_entry_2 **res_dir, ext4_lblk_t *lblk) { struct super_block * sb = dir->i_sb; struct dx_frame frames[EXT4_HTREE_LEVEL], *frame; @@ -1638,11 +1719,13 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir, return (struct buffer_head *) frame; do { block = dx_get_block(frame->at); + if (lblk) + *lblk = block; bh = ext4_read_dirblock(dir, block, DIRENT_HTREE); if (IS_ERR(bh)) goto errout; - retval = search_dirblock(bh, dir, fname, + retval = search_dirblock(bh, dir, fname, block, block << EXT4_BLOCK_SIZE_BITS(sb), res_dir); if (retval == 1) @@ -1737,7 +1820,7 @@ struct dentry *ext4_get_parent(struct dentry *child) struct ext4_dir_entry_2 * de; struct buffer_head *bh; - bh = ext4_find_entry(d_inode(child), &dotdot, &de, NULL); + bh = ext4_find_entry(d_inode(child), &dotdot, &de, NULL, NULL); if (IS_ERR(bh)) return (struct dentry *) bh; if (!bh) @@ -1759,7 +1842,8 @@ struct dentry *ext4_get_parent(struct dentry *child) * Returns pointer to last entry moved. */ static struct ext4_dir_entry_2 * -dx_move_dirents(char *from, char *to, struct dx_map_entry *map, int count, +dx_move_dirents(struct inode *dir, char *from, char *to, + struct dx_map_entry *map, int count, unsigned blocksize) { unsigned rec_len = 0; @@ -1767,7 +1851,8 @@ dx_move_dirents(char *from, char *to, struct dx_map_entry *map, int count, while (count--) { struct ext4_dir_entry_2 *de = (struct ext4_dir_entry_2 *) (from + (map->offs<<2)); - rec_len = EXT4_DIR_REC_LEN(de->name_len); + rec_len = ext4_dir_rec_len(de->name_len, dir); + memcpy (to, de, rec_len); ((struct ext4_dir_entry_2 *) to)->rec_len = ext4_rec_len_to_disk(rec_len, blocksize); @@ -1782,7 +1867,8 @@ dx_move_dirents(char *from, char *to, struct dx_map_entry *map, int count, * Compact each dir entry in the range to the minimal rec_len. * Returns pointer to last entry in range. */ -static struct ext4_dir_entry_2* dx_pack_dirents(char *base, unsigned blocksize) +static struct ext4_dir_entry_2 *dx_pack_dirents(struct inode *dir, char *base, + unsigned int blocksize) { struct ext4_dir_entry_2 *next, *to, *prev, *de = (struct ext4_dir_entry_2 *) base; unsigned rec_len = 0; @@ -1791,7 +1877,7 @@ static struct ext4_dir_entry_2* dx_pack_dirents(char *base, unsigned blocksize) while ((char*)de < base + blocksize) { next = ext4_next_entry(de, blocksize); if (de->inode && de->name_len) { - rec_len = EXT4_DIR_REC_LEN(de->name_len); + rec_len = ext4_dir_rec_len(de->name_len, dir); if (de > to) memmove(to, de, rec_len); to->rec_len = ext4_rec_len_to_disk(rec_len, blocksize); @@ -1809,13 +1895,12 @@ static struct ext4_dir_entry_2* dx_pack_dirents(char *base, unsigned blocksize) * Returns pointer to de in block into which the new entry will be inserted. */ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir, - struct buffer_head **bh,struct dx_frame *frame, - struct dx_hash_info *hinfo) + struct buffer_head **bh, struct dx_frame *frame, + struct dx_hash_info *hinfo, ext4_lblk_t *newblock) { unsigned blocksize = dir->i_sb->s_blocksize; unsigned count, continued; struct buffer_head *bh2; - ext4_lblk_t newblock; u32 hash2; struct dx_map_entry *map; char *data1 = (*bh)->b_data, *data2; @@ -1828,7 +1913,7 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir, if (ext4_has_metadata_csum(dir->i_sb)) csum_size = sizeof(struct ext4_dir_entry_tail); - bh2 = ext4_append(handle, dir, &newblock); + bh2 = ext4_append(handle, dir, newblock); if (IS_ERR(bh2)) { brelse(*bh); *bh = NULL; @@ -1872,9 +1957,9 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir, hash2, split, count-split)); /* Fancy dance to stay within two buffers */ - de2 = dx_move_dirents(data1, data2, map + split, count - split, + de2 = dx_move_dirents(dir, data1, data2, map + split, count - split, blocksize); - de = dx_pack_dirents(data1, blocksize); + de = dx_pack_dirents(dir, data1, blocksize); de->rec_len = ext4_rec_len_to_disk(data1 + (blocksize - csum_size) - (char *) de, blocksize); @@ -1899,7 +1984,7 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir, swap(*bh, bh2); de = de2; } - dx_insert_block(frame, hash2 + continued, newblock); + dx_insert_block(frame, hash2 + continued, *newblock); err = ext4_handle_dirty_dirent_node(handle, dir, bh2); if (err) goto journal_error; @@ -1919,13 +2004,14 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir, } int ext4_find_dest_de(struct inode *dir, struct inode *inode, + ext4_lblk_t lblk, struct buffer_head *bh, void *buf, int buf_size, struct ext4_filename *fname, struct ext4_dir_entry_2 **dest_de) { struct ext4_dir_entry_2 *de; - unsigned short reclen = EXT4_DIR_REC_LEN(fname_len(fname)); + unsigned short reclen = ext4_dir_rec_len(fname_len(fname), dir); int nlen, rlen; unsigned int offset = 0; char *top; @@ -1934,11 +2020,11 @@ int ext4_find_dest_de(struct inode *dir, struct inode *inode, top = buf + buf_size - reclen; while ((char *) de <= top) { if (ext4_check_dir_entry(dir, NULL, de, bh, - buf, buf_size, offset)) + buf, buf_size, lblk, offset)) return -EFSCORRUPTED; if (ext4_match(dir, fname, de)) return -EEXIST; - nlen = EXT4_DIR_REC_LEN(de->name_len); + nlen = ext4_dir_rec_len(de->name_len, dir); rlen = ext4_rec_len_from_disk(de->rec_len, buf_size); if ((de->inode ? rlen - nlen : rlen) >= reclen) break; @@ -1952,7 +2038,8 @@ int ext4_find_dest_de(struct inode *dir, struct inode *inode, return 0; } -void ext4_insert_dentry(struct inode *inode, +void ext4_insert_dentry(struct inode *dir, + struct inode *inode, struct ext4_dir_entry_2 *de, int buf_size, struct ext4_filename *fname) @@ -1960,7 +2047,7 @@ void ext4_insert_dentry(struct inode *inode, int nlen, rlen; - nlen = EXT4_DIR_REC_LEN(de->name_len); + nlen = ext4_dir_rec_len(de->name_len, dir); rlen = ext4_rec_len_from_disk(de->rec_len, buf_size); if (de->inode) { struct ext4_dir_entry_2 *de1 = @@ -1974,6 +2061,13 @@ void ext4_insert_dentry(struct inode *inode, ext4_set_de_type(inode->i_sb, de, inode->i_mode); de->name_len = fname_len(fname); memcpy(de->name, fname_name(fname), fname_len(fname)); + if (ext4_hash_in_dirent(dir)) { + struct dx_hash_info *hinfo = &fname->hinfo; + + EXT4_DIRENT_HASHES(de)->hash = cpu_to_le32(hinfo->hash); + EXT4_DIRENT_HASHES(de)->minor_hash = + cpu_to_le32(hinfo->minor_hash); + } } /* @@ -1987,6 +2081,7 @@ void ext4_insert_dentry(struct inode *inode, static int add_dirent_to_buf(handle_t *handle, struct ext4_filename *fname, struct inode *dir, struct inode *inode, struct ext4_dir_entry_2 *de, + ext4_lblk_t blk, struct buffer_head *bh) { unsigned int blocksize = dir->i_sb->s_blocksize; @@ -1997,7 +2092,7 @@ static int add_dirent_to_buf(handle_t *handle, struct ext4_filename *fname, csum_size = sizeof(struct ext4_dir_entry_tail); if (!de) { - err = ext4_find_dest_de(dir, inode, bh, bh->b_data, + err = ext4_find_dest_de(dir, inode, blk, bh, bh->b_data, blocksize - csum_size, fname, &de); if (err) return err; @@ -2010,7 +2105,7 @@ static int add_dirent_to_buf(handle_t *handle, struct ext4_filename *fname, } /* By now the buffer is marked for journaling */ - ext4_insert_dentry(inode, de, blocksize, fname); + ext4_insert_dentry(dir, inode, de, blocksize, fname); /* * XXX shouldn't update any times until successful @@ -2106,11 +2201,16 @@ static int make_indexed_dir(handle_t *handle, struct ext4_filename *fname, /* Initialize the root; the dot dirents already exist */ de = (struct ext4_dir_entry_2 *) (&root->dotdot); - de->rec_len = ext4_rec_len_to_disk(blocksize - EXT4_DIR_REC_LEN(2), - blocksize); + de->rec_len = ext4_rec_len_to_disk( + blocksize - ext4_dir_rec_len(2, NULL), blocksize); memset (&root->info, 0, sizeof(root->info)); root->info.info_length = sizeof(root->info); - root->info.hash_version = EXT4_SB(dir->i_sb)->s_def_hash_version; + if (ext4_hash_in_dirent(dir)) + root->info.hash_version = DX_HASH_SIPHASH; + else + root->info.hash_version = + EXT4_SB(dir->i_sb)->s_def_hash_version; + entries = root->entries; dx_set_block(entries, 1); dx_set_count(entries, 1); @@ -2121,7 +2221,11 @@ static int make_indexed_dir(handle_t *handle, struct ext4_filename *fname, if (fname->hinfo.hash_version <= DX_HASH_TEA) fname->hinfo.hash_version += EXT4_SB(dir->i_sb)->s_hash_unsigned; fname->hinfo.seed = EXT4_SB(dir->i_sb)->s_hash_seed; - ext4fs_dirhash(dir, fname_name(fname), fname_len(fname), &fname->hinfo); + + /* casefolded encrypted hashes are computed on fname setup */ + if (!ext4_hash_in_dirent(dir)) + ext4fs_dirhash(dir, fname_name(fname), + fname_len(fname), &fname->hinfo); memset(frames, 0, sizeof(frames)); frame = frames; @@ -2136,13 +2240,13 @@ static int make_indexed_dir(handle_t *handle, struct ext4_filename *fname, if (retval) goto out_frames; - de = do_split(handle,dir, &bh2, frame, &fname->hinfo); + de = do_split(handle, dir, &bh2, frame, &fname->hinfo, &block); if (IS_ERR(de)) { retval = PTR_ERR(de); goto out_frames; } - retval = add_dirent_to_buf(handle, fname, dir, inode, de, bh2); + retval = add_dirent_to_buf(handle, fname, dir, inode, de, block, bh2); out_frames: /* * Even if the block split failed, we have to properly write @@ -2238,7 +2342,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry, goto out; } retval = add_dirent_to_buf(handle, &fname, dir, inode, - NULL, bh); + NULL, block, bh); if (retval != -ENOSPC) goto out; @@ -2267,7 +2371,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry, initialize_dirent_tail(t, blocksize); } - retval = add_dirent_to_buf(handle, &fname, dir, inode, de, bh); + retval = add_dirent_to_buf(handle, &fname, dir, inode, de, block, bh); out: ext4_fname_free_filename(&fname); brelse(bh); @@ -2289,6 +2393,7 @@ static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname, struct ext4_dir_entry_2 *de; int restart; int err; + ext4_lblk_t lblk; again: restart = 0; @@ -2297,7 +2402,8 @@ static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname, return PTR_ERR(frame); entries = frame->entries; at = frame->at; - bh = ext4_read_dirblock(dir, dx_get_block(frame->at), DIRENT_HTREE); + lblk = dx_get_block(frame->at); + bh = ext4_read_dirblock(dir, lblk, DIRENT_HTREE); if (IS_ERR(bh)) { err = PTR_ERR(bh); bh = NULL; @@ -2309,7 +2415,7 @@ static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname, if (err) goto journal_error; - err = add_dirent_to_buf(handle, fname, dir, inode, NULL, bh); + err = add_dirent_to_buf(handle, fname, dir, inode, NULL, lblk, bh); if (err != -ENOSPC) goto cleanup; @@ -2429,12 +2535,12 @@ static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname, goto journal_error; } } - de = do_split(handle, dir, &bh, frame, &fname->hinfo); + de = do_split(handle, dir, &bh, frame, &fname->hinfo, &lblk); if (IS_ERR(de)) { err = PTR_ERR(de); goto cleanup; } - err = add_dirent_to_buf(handle, fname, dir, inode, de, bh); + err = add_dirent_to_buf(handle, fname, dir, inode, de, lblk, bh); goto cleanup; journal_error: @@ -2457,6 +2563,7 @@ static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname, int ext4_generic_delete_entry(handle_t *handle, struct inode *dir, struct ext4_dir_entry_2 *de_del, + ext4_lblk_t lblk, struct buffer_head *bh, void *entry_buf, int buf_size, @@ -2471,7 +2578,7 @@ int ext4_generic_delete_entry(handle_t *handle, de = (struct ext4_dir_entry_2 *)entry_buf; while (i < buf_size - csum_size) { if (ext4_check_dir_entry(dir, NULL, de, bh, - bh->b_data, bh->b_size, i)) + bh->b_data, bh->b_size, lblk, i)) return -EFSCORRUPTED; if (de == de_del) { if (pde) @@ -2496,6 +2603,7 @@ int ext4_generic_delete_entry(handle_t *handle, static int ext4_delete_entry(handle_t *handle, struct inode *dir, struct ext4_dir_entry_2 *de_del, + ext4_lblk_t lblk, struct buffer_head *bh) { int err, csum_size = 0; @@ -2516,7 +2624,7 @@ static int ext4_delete_entry(handle_t *handle, if (unlikely(err)) goto out; - err = ext4_generic_delete_entry(handle, dir, de_del, + err = ext4_generic_delete_entry(handle, dir, de_del, lblk, bh, bh->b_data, dir->i_sb->s_blocksize, csum_size); if (err) @@ -2699,7 +2807,7 @@ struct ext4_dir_entry_2 *ext4_init_dot_dotdot(struct inode *inode, { de->inode = cpu_to_le32(inode->i_ino); de->name_len = 1; - de->rec_len = ext4_rec_len_to_disk(EXT4_DIR_REC_LEN(de->name_len), + de->rec_len = ext4_rec_len_to_disk(ext4_dir_rec_len(de->name_len, NULL), blocksize); strcpy(de->name, "."); ext4_set_de_type(inode->i_sb, de, S_IFDIR); @@ -2709,11 +2817,12 @@ struct ext4_dir_entry_2 *ext4_init_dot_dotdot(struct inode *inode, de->name_len = 2; if (!dotdot_real_len) de->rec_len = ext4_rec_len_to_disk(blocksize - - (csum_size + EXT4_DIR_REC_LEN(1)), + (csum_size + ext4_dir_rec_len(1, NULL)), blocksize); else de->rec_len = ext4_rec_len_to_disk( - EXT4_DIR_REC_LEN(de->name_len), blocksize); + ext4_dir_rec_len(de->name_len, NULL), + blocksize); strcpy(de->name, ".."); ext4_set_de_type(inode->i_sb, de, S_IFDIR); @@ -2841,7 +2950,8 @@ bool ext4_empty_dir(struct inode *inode) } sb = inode->i_sb; - if (inode->i_size < EXT4_DIR_REC_LEN(1) + EXT4_DIR_REC_LEN(2)) { + if (inode->i_size < ext4_dir_rec_len(1, NULL) + + ext4_dir_rec_len(2, NULL)) { EXT4_ERROR_INODE(inode, "invalid size"); return true; } @@ -2853,7 +2963,7 @@ bool ext4_empty_dir(struct inode *inode) return true; de = (struct ext4_dir_entry_2 *) bh->b_data; - if (ext4_check_dir_entry(inode, NULL, de, bh, bh->b_data, bh->b_size, + if (ext4_check_dir_entry(inode, NULL, de, bh, bh->b_data, bh->b_size, 0, 0) || le32_to_cpu(de->inode) != inode->i_ino || strcmp(".", de->name)) { ext4_warning_inode(inode, "directory missing '.'"); @@ -2862,7 +2972,7 @@ bool ext4_empty_dir(struct inode *inode) } offset = ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize); de = ext4_next_entry(de, sb->s_blocksize); - if (ext4_check_dir_entry(inode, NULL, de, bh, bh->b_data, bh->b_size, + if (ext4_check_dir_entry(inode, NULL, de, bh, bh->b_data, bh->b_size, 0, offset) || le32_to_cpu(de->inode) == 0 || strcmp("..", de->name)) { ext4_warning_inode(inode, "directory missing '..'"); @@ -2886,7 +2996,7 @@ bool ext4_empty_dir(struct inode *inode) de = (struct ext4_dir_entry_2 *) (bh->b_data + (offset & (sb->s_blocksize - 1))); if (ext4_check_dir_entry(inode, NULL, de, bh, - bh->b_data, bh->b_size, offset)) { + bh->b_data, bh->b_size, 0, offset)) { offset = (offset | (sb->s_blocksize - 1)) + 1; continue; } @@ -3081,6 +3191,8 @@ static int ext4_rmdir(struct inode *dir, struct dentry *dentry) struct buffer_head *bh; struct ext4_dir_entry_2 *de; handle_t *handle = NULL; + ext4_lblk_t lblk; + if (unlikely(ext4_forced_shutdown(EXT4_SB(dir->i_sb)))) return -EIO; @@ -3095,7 +3207,7 @@ static int ext4_rmdir(struct inode *dir, struct dentry *dentry) return retval; retval = -ENOENT; - bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL); + bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL, &lblk); if (IS_ERR(bh)) return PTR_ERR(bh); if (!bh) @@ -3122,7 +3234,7 @@ static int ext4_rmdir(struct inode *dir, struct dentry *dentry) if (IS_DIRSYNC(dir)) ext4_handle_sync(handle); - retval = ext4_delete_entry(handle, dir, de, bh); + retval = ext4_delete_entry(handle, dir, de, lblk, bh); if (retval) goto end_rmdir; if (!EXT4_DIR_LINK_EMPTY(inode)) @@ -3168,6 +3280,7 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry) struct buffer_head *bh; struct ext4_dir_entry_2 *de; handle_t *handle = NULL; + ext4_lblk_t lblk; if (unlikely(ext4_forced_shutdown(EXT4_SB(dir->i_sb)))) return -EIO; @@ -3183,7 +3296,7 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry) return retval; retval = -ENOENT; - bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL); + bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL, &lblk); if (IS_ERR(bh)) return PTR_ERR(bh); if (!bh) @@ -3206,7 +3319,7 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry) if (IS_DIRSYNC(dir)) ext4_handle_sync(handle); - retval = ext4_delete_entry(handle, dir, de, bh); + retval = ext4_delete_entry(handle, dir, de, lblk, bh); if (retval) goto end_unlink; dir->i_ctime = dir->i_mtime = current_time(dir); @@ -3468,6 +3581,7 @@ struct ext4_renament { int dir_nlink_delta; /* entry for "dentry" */ + ext4_lblk_t lblk; struct buffer_head *bh; struct ext4_dir_entry_2 *de; int inlined; @@ -3557,12 +3671,13 @@ static int ext4_find_delete_entry(handle_t *handle, struct inode *dir, int retval = -ENOENT; struct buffer_head *bh; struct ext4_dir_entry_2 *de; + ext4_lblk_t lblk; - bh = ext4_find_entry(dir, d_name, &de, NULL); + bh = ext4_find_entry(dir, d_name, &de, NULL, &lblk); if (IS_ERR(bh)) return PTR_ERR(bh); if (bh) { - retval = ext4_delete_entry(handle, dir, de, bh); + retval = ext4_delete_entry(handle, dir, de, lblk, bh); brelse(bh); } return retval; @@ -3586,7 +3701,8 @@ static void ext4_rename_delete(handle_t *handle, struct ext4_renament *ent, retval = ext4_find_delete_entry(handle, ent->dir, &ent->dentry->d_name); } else { - retval = ext4_delete_entry(handle, ent->dir, ent->de, ent->bh); + retval = ext4_delete_entry(handle, ent->dir, ent->de, + ent->lblk, ent->bh); if (retval == -ENOENT) { retval = ext4_find_delete_entry(handle, ent->dir, &ent->dentry->d_name); @@ -3699,7 +3815,8 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, return retval; } - old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, &old.de, NULL); + old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, &old.de, NULL, + &old.lblk); if (IS_ERR(old.bh)) return PTR_ERR(old.bh); /* @@ -3713,7 +3830,7 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, goto end_rename; new.bh = ext4_find_entry(new.dir, &new.dentry->d_name, - &new.de, &new.inlined); + &new.de, &new.inlined, NULL); if (IS_ERR(new.bh)) { retval = PTR_ERR(new.bh); new.bh = NULL; @@ -3893,7 +4010,7 @@ static int ext4_cross_rename(struct inode *old_dir, struct dentry *old_dentry, return retval; old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, - &old.de, &old.inlined); + &old.de, &old.inlined, NULL); if (IS_ERR(old.bh)) return PTR_ERR(old.bh); /* @@ -3907,7 +4024,7 @@ static int ext4_cross_rename(struct inode *old_dir, struct dentry *old_dentry, goto end_rename; new.bh = ext4_find_entry(new.dir, &new.dentry->d_name, - &new.de, &new.inlined); + &new.de, &new.inlined, NULL); if (IS_ERR(new.bh)) { retval = PTR_ERR(new.bh); new.bh = NULL; diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 8731f693513699a556f1cf4c6b3d1b13e7311bd7..820d9a4d11ed6070cb079a0308348ed9624e2766 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -1000,6 +1000,7 @@ static void ext4_put_super(struct super_block *sb) crypto_free_shash(sbi->s_chksum_driver); kfree(sbi->s_blockgroup_lock); fs_put_dax(sbi->s_daxdev); + fscrypt_free_dummy_context(&sbi->s_dummy_enc_ctx); #ifdef CONFIG_UNICODE utf8_unload(sb->s_encoding); #endif @@ -1283,9 +1284,10 @@ static int ext4_set_context(struct inode *inode, const void *ctx, size_t len, return res; } -static bool ext4_dummy_context(struct inode *inode) +static const union fscrypt_context * +ext4_get_dummy_context(struct super_block *sb) { - return DUMMY_ENCRYPTION_ENABLED(EXT4_SB(inode->i_sb)); + return EXT4_SB(sb)->s_dummy_enc_ctx.ctx; } static bool ext4_has_stable_inodes(struct super_block *sb) @@ -1309,7 +1311,7 @@ static const struct fscrypt_operations ext4_cryptops = { .key_prefix = "ext4:", .get_context = ext4_get_context, .set_context = ext4_set_context, - .dummy_context = ext4_dummy_context, + .get_dummy_context = ext4_get_dummy_context, .empty_dir = ext4_empty_dir, .max_namelen = EXT4_NAME_LEN, .has_stable_inodes = ext4_has_stable_inodes, @@ -1503,6 +1505,7 @@ static const match_table_t tokens = { {Opt_init_itable, "init_itable"}, {Opt_noinit_itable, "noinit_itable"}, {Opt_max_dir_size_kb, "max_dir_size_kb=%u"}, + {Opt_test_dummy_encryption, "test_dummy_encryption=%s"}, {Opt_test_dummy_encryption, "test_dummy_encryption"}, {Opt_inlinecrypt, "inlinecrypt"}, {Opt_nombcache, "nombcache"}, @@ -1713,7 +1716,7 @@ static const struct mount_opts { {Opt_jqfmt_vfsv0, QFMT_VFS_V0, MOPT_QFMT}, {Opt_jqfmt_vfsv1, QFMT_VFS_V1, MOPT_QFMT}, {Opt_max_dir_size_kb, 0, MOPT_GTE0}, - {Opt_test_dummy_encryption, 0, MOPT_GTE0}, + {Opt_test_dummy_encryption, 0, MOPT_STRING}, #ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT {Opt_inlinecrypt, EXT4_MOUNT_INLINECRYPT, MOPT_SET}, #else @@ -1753,6 +1756,48 @@ static int ext4_sb_read_encoding(const struct ext4_super_block *es, } #endif +static int ext4_set_test_dummy_encryption(struct super_block *sb, + const char *opt, + const substring_t *arg, + bool is_remount) +{ +#ifdef CONFIG_FS_ENCRYPTION + struct ext4_sb_info *sbi = EXT4_SB(sb); + int err; + + /* + * This mount option is just for testing, and it's not worthwhile to + * implement the extra complexity (e.g. RCU protection) that would be + * needed to allow it to be set or changed during remount. We do allow + * it to be specified during remount, but only if there is no change. + */ + if (is_remount && !sbi->s_dummy_enc_ctx.ctx) { + ext4_msg(sb, KERN_WARNING, + "Can't set test_dummy_encryption on remount"); + return -1; + } + err = fscrypt_set_test_dummy_encryption(sb, arg, &sbi->s_dummy_enc_ctx); + if (err) { + if (err == -EEXIST) + ext4_msg(sb, KERN_WARNING, + "Can't change test_dummy_encryption on remount"); + else if (err == -EINVAL) + ext4_msg(sb, KERN_WARNING, + "Value of option \"%s\" is unrecognized", opt); + else + ext4_msg(sb, KERN_WARNING, + "Error processing option \"%s\" [%d]", + opt, err); + return -1; + } + ext4_msg(sb, KERN_WARNING, "Test dummy encryption mode enabled"); +#else + ext4_msg(sb, KERN_WARNING, + "Test dummy encryption mount option ignored"); +#endif + return 1; +} + static int handle_mount_opt(struct super_block *sb, char *opt, int token, substring_t *args, unsigned long *journal_devnum, unsigned int *journal_ioprio, int is_remount) @@ -1942,14 +1987,8 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token, *journal_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, arg); } else if (token == Opt_test_dummy_encryption) { -#ifdef CONFIG_FS_ENCRYPTION - sbi->s_mount_flags |= EXT4_MF_TEST_DUMMY_ENCRYPTION; - ext4_msg(sb, KERN_WARNING, - "Test dummy encryption mode enabled"); -#else - ext4_msg(sb, KERN_WARNING, - "Test dummy encryption mount option ignored"); -#endif + return ext4_set_test_dummy_encryption(sb, opt, &args[0], + is_remount); } else if (m->flags & MOPT_DATAJ) { if (is_remount) { if (!sbi->s_journal) @@ -2207,8 +2246,8 @@ static int _ext4_show_options(struct seq_file *seq, struct super_block *sb, SEQ_OPTS_PRINT("max_dir_size_kb=%u", sbi->s_max_dir_size_kb); if (test_opt(sb, DATA_ERR_ABORT)) SEQ_OPTS_PUTS("data_err=abort"); - if (DUMMY_ENCRYPTION_ENABLED(sbi)) - SEQ_OPTS_PUTS("test_dummy_encryption"); + + fscrypt_show_test_dummy_encryption(seq, sep, sb); ext4_show_quota_options(seq, sb); return 0; @@ -3845,12 +3884,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) struct unicode_map *encoding; __u16 encoding_flags; - if (ext4_has_feature_encrypt(sb)) { - ext4_msg(sb, KERN_ERR, - "Can't mount with encoding and encryption"); - goto failed_mount; - } - if (ext4_sb_read_encoding(es, &encoding_info, &encoding_flags)) { ext4_msg(sb, KERN_ERR, @@ -4688,6 +4721,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) for (i = 0; i < EXT4_MAXQUOTAS; i++) kfree(sbi->s_qf_names[i]); #endif + fscrypt_free_dummy_context(&sbi->s_dummy_enc_ctx); ext4_blkdev_remove(sbi); brelse(bh); out_fail: diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c index eca4945ed07d6edee7c0935fd23b5f018a80c503..a6282efd16ebf0be99a7d54c01091947692e7114 100644 --- a/fs/ext4/sysfs.c +++ b/fs/ext4/sysfs.c @@ -226,6 +226,7 @@ EXT4_ATTR_FEATURE(batched_discard); EXT4_ATTR_FEATURE(meta_bg_resize); #ifdef CONFIG_FS_ENCRYPTION EXT4_ATTR_FEATURE(encryption); +EXT4_ATTR_FEATURE(test_dummy_encryption_v2); #endif #ifdef CONFIG_UNICODE EXT4_ATTR_FEATURE(casefold); @@ -241,6 +242,7 @@ static struct attribute *ext4_feat_attrs[] = { ATTR_LIST(meta_bg_resize), #ifdef CONFIG_FS_ENCRYPTION ATTR_LIST(encryption), + ATTR_LIST(test_dummy_encryption_v2), #endif #ifdef CONFIG_UNICODE ATTR_LIST(casefold), diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index b0873b89dc879ff695c3dd93ca4d22a2404029f7..823b01f6b0f0a7a9d006c7f3c24df79a4fd06d32 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c @@ -1823,8 +1823,11 @@ ext4_xattr_block_find(struct inode *inode, struct ext4_xattr_info *i, if (EXT4_I(inode)->i_file_acl) { /* The inode already has an extended attribute block. */ bs->bh = ext4_sb_bread(sb, EXT4_I(inode)->i_file_acl, REQ_PRIO); - if (IS_ERR(bs->bh)) - return PTR_ERR(bs->bh); + if (IS_ERR(bs->bh)) { + error = PTR_ERR(bs->bh); + bs->bh = NULL; + return error; + } ea_bdebug(bs->bh, "b_count=%d, refcount=%d", atomic_read(&(bs->bh->b_count)), le32_to_cpu(BHDR(bs->bh)->h_refcount)); diff --git a/fs/f2fs/acl.h b/fs/f2fs/acl.h index b96823c59b15a4c62f3bd00558cbc04cd9c96791..124868c13f80fff51c6d214df8f26215b98b461e 100644 --- a/fs/f2fs/acl.h +++ b/fs/f2fs/acl.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0 */ /* * fs/f2fs/acl.h * diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index 1b0d57b0d00cfb68d4d5784ed2a0090d9ff11cc0..f3773d66e7c0eadc6a7271b27fba401cbac15c45 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -895,8 +895,8 @@ int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi) int i; int err; - sbi->ckpt = f2fs_kzalloc(sbi, array_size(blk_size, cp_blks), - GFP_KERNEL); + sbi->ckpt = f2fs_kvzalloc(sbi, array_size(blk_size, cp_blks), + GFP_KERNEL); if (!sbi->ckpt) return -ENOMEM; /* @@ -1168,6 +1168,11 @@ static int block_operations(struct f2fs_sb_info *sbi) }; int err = 0, cnt = 0; + /* + * Let's flush inline_data in dirty node pages. + */ + f2fs_flush_inline_data(sbi); + retry_flush_quotas: f2fs_lock_all(sbi); if (__need_flush_quota(sbi)) { @@ -1261,6 +1266,9 @@ void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type) if (unlikely(f2fs_cp_error(sbi))) break; + if (type == F2FS_DIRTY_META) + f2fs_sync_meta_pages(sbi, META, LONG_MAX, + FS_CP_META_IO); io_schedule_timeout(DEFAULT_IO_TIMEOUT); } finish_wait(&sbi->cp_wait, &wait); @@ -1554,7 +1562,8 @@ int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) return 0; f2fs_warn(sbi, "Start checkpoint disabled!"); } - mutex_lock(&sbi->cp_mutex); + if (cpc->reason != CP_RESIZE) + mutex_lock(&sbi->cp_mutex); if (!is_sbi_flag_set(sbi, SBI_IS_DIRTY) && ((cpc->reason & CP_FASTBOOT) || (cpc->reason & CP_SYNC) || @@ -1623,7 +1632,8 @@ int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) f2fs_update_time(sbi, CP_TIME); trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish checkpoint"); out: - mutex_unlock(&sbi->cp_mutex); + if (cpc->reason != CP_RESIZE) + mutex_unlock(&sbi->cp_mutex); return err; } diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c index f5a747d8b044bbd881650dce1d96875afd609953..20e06426a681eec0efa399d611a1cf0085030750 100644 --- a/fs/f2fs/compress.c +++ b/fs/f2fs/compress.c @@ -12,6 +12,7 @@ #include #include #include +#include #include "f2fs.h" #include "node.h" @@ -70,15 +71,6 @@ static void f2fs_set_compressed_page(struct page *page, page->mapping = inode->i_mapping; } -static void f2fs_put_compressed_page(struct page *page) -{ - set_page_private(page, (unsigned long)NULL); - ClearPagePrivate(page); - page->mapping = NULL; - unlock_page(page); - put_page(page); -} - static void f2fs_drop_rpages(struct compress_ctx *cc, int len, bool unlock) { int i; @@ -103,8 +95,7 @@ static void f2fs_unlock_rpages(struct compress_ctx *cc, int len) f2fs_drop_rpages(cc, len, true); } -static void f2fs_put_rpages_mapping(struct compress_ctx *cc, - struct address_space *mapping, +static void f2fs_put_rpages_mapping(struct address_space *mapping, pgoff_t start, int len) { int i; @@ -241,7 +232,12 @@ static int lz4_init_compress_ctx(struct compress_ctx *cc) if (!cc->private) return -ENOMEM; - cc->clen = LZ4_compressBound(PAGE_SIZE << cc->log_cluster_size); + /* + * we do not change cc->clen to LZ4_compressBound(inputsize) to + * adapt worst compress case, because lz4 compressor can handle + * output budget properly. + */ + cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE; return 0; } @@ -257,11 +253,9 @@ static int lz4_compress_pages(struct compress_ctx *cc) len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen, cc->clen, cc->private); - if (!len) { - printk_ratelimited("%sF2FS-fs (%s): lz4 compress failed\n", - KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id); - return -EIO; - } + if (!len) + return -EAGAIN; + cc->clen = len; return 0; } @@ -371,6 +365,13 @@ static int zstd_compress_pages(struct compress_ctx *cc) return -EIO; } + /* + * there is compressed data remained in intermediate buffer due to + * no more space in cbuf.cdata + */ + if (ret) + return -EAGAIN; + cc->clen = outbuf.pos; return 0; } @@ -481,17 +482,47 @@ bool f2fs_is_compress_backend_ready(struct inode *inode) return f2fs_cops[F2FS_I(inode)->i_compress_algorithm]; } -static struct page *f2fs_grab_page(void) +static mempool_t *compress_page_pool = NULL; +static int num_compress_pages = 512; +module_param(num_compress_pages, uint, 0444); +MODULE_PARM_DESC(num_compress_pages, + "Number of intermediate compress pages to preallocate"); + +int f2fs_init_compress_mempool(void) +{ + compress_page_pool = mempool_create_page_pool(num_compress_pages, 0); + if (!compress_page_pool) + return -ENOMEM; + + return 0; +} + +void f2fs_destroy_compress_mempool(void) +{ + mempool_destroy(compress_page_pool); +} + +static struct page *f2fs_compress_alloc_page(void) { struct page *page; - page = alloc_page(GFP_NOFS); - if (!page) - return NULL; + page = mempool_alloc(compress_page_pool, GFP_NOFS); lock_page(page); + return page; } +static void f2fs_compress_free_page(struct page *page) +{ + if (!page) + return; + set_page_private(page, (unsigned long)NULL); + ClearPagePrivate(page); + page->mapping = NULL; + unlock_page(page); + mempool_free(page, compress_page_pool); +} + static int f2fs_compress_pages(struct compress_ctx *cc) { struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode); @@ -521,7 +552,7 @@ static int f2fs_compress_pages(struct compress_ctx *cc) } for (i = 0; i < cc->nr_cpages; i++) { - cc->cpages[i] = f2fs_grab_page(); + cc->cpages[i] = f2fs_compress_alloc_page(); if (!cc->cpages[i]) { ret = -ENOMEM; goto out_free_cpages; @@ -566,7 +597,7 @@ static int f2fs_compress_pages(struct compress_ctx *cc) vunmap(cc->rbuf); for (i = nr_cpages; i < cc->nr_cpages; i++) { - f2fs_put_compressed_page(cc->cpages[i]); + f2fs_compress_free_page(cc->cpages[i]); cc->cpages[i] = NULL; } @@ -586,7 +617,7 @@ static int f2fs_compress_pages(struct compress_ctx *cc) out_free_cpages: for (i = 0; i < cc->nr_cpages; i++) { if (cc->cpages[i]) - f2fs_put_compressed_page(cc->cpages[i]); + f2fs_compress_free_page(cc->cpages[i]); } kfree(cc->cpages); cc->cpages = NULL; @@ -793,6 +824,8 @@ static bool cluster_may_compress(struct compress_ctx *cc) return false; if (!f2fs_cluster_is_full(cc)) return false; + if (unlikely(f2fs_cp_error(F2FS_I_SB(cc->inode)))) + return false; return __cluster_may_compress(cc); } @@ -884,7 +917,7 @@ static int prepare_compress_overwrite(struct compress_ctx *cc, if (!PageUptodate(page)) { f2fs_unlock_rpages(cc, i + 1); - f2fs_put_rpages_mapping(cc, mapping, start_idx, + f2fs_put_rpages_mapping(mapping, start_idx, cc->cluster_size); f2fs_destroy_compress_ctx(cc); goto retry; @@ -919,7 +952,7 @@ static int prepare_compress_overwrite(struct compress_ctx *cc, unlock_pages: f2fs_unlock_rpages(cc, i); release_pages: - f2fs_put_rpages_mapping(cc, mapping, start_idx, i); + f2fs_put_rpages_mapping(mapping, start_idx, i); f2fs_destroy_compress_ctx(cc); return ret; } @@ -959,6 +992,55 @@ bool f2fs_compress_write_end(struct inode *inode, void *fsdata, return first_index; } +int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock) +{ + void *fsdata = NULL; + struct page *pagep; + int log_cluster_size = F2FS_I(inode)->i_log_cluster_size; + pgoff_t start_idx = from >> (PAGE_SHIFT + log_cluster_size) << + log_cluster_size; + int err; + + err = f2fs_is_compressed_cluster(inode, start_idx); + if (err < 0) + return err; + + /* truncate normal cluster */ + if (!err) + return f2fs_do_truncate_blocks(inode, from, lock); + + /* truncate compressed cluster */ + err = f2fs_prepare_compress_overwrite(inode, &pagep, + start_idx, &fsdata); + + /* should not be a normal cluster */ + f2fs_bug_on(F2FS_I_SB(inode), err == 0); + + if (err <= 0) + return err; + + if (err > 0) { + struct page **rpages = fsdata; + int cluster_size = F2FS_I(inode)->i_cluster_size; + int i; + + for (i = cluster_size - 1; i >= 0; i--) { + loff_t start = rpages[i]->index << PAGE_SHIFT; + + if (from <= start) { + zero_user_segment(rpages[i], 0, PAGE_SIZE); + } else { + zero_user_segment(rpages[i], from - start, + PAGE_SIZE); + break; + } + } + + f2fs_compress_write_end(inode, fsdata, start_idx, true); + } + return 0; +} + static int f2fs_write_compressed_pages(struct compress_ctx *cc, int *submitted, struct writeback_control *wbc, @@ -1140,7 +1222,7 @@ void f2fs_compress_write_end_io(struct bio *bio, struct page *page) if (unlikely(bio->bi_status)) mapping_set_error(cic->inode->i_mapping, -EIO); - f2fs_put_compressed_page(page); + f2fs_compress_free_page(page); dec_page_count(sbi, F2FS_WB_DATA); @@ -1301,7 +1383,7 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc) for (i = 0; i < dic->nr_cpages; i++) { struct page *page; - page = f2fs_grab_page(); + page = f2fs_compress_alloc_page(); if (!page) goto out_free; @@ -1321,7 +1403,7 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc) continue; } - dic->tpages[i] = f2fs_grab_page(); + dic->tpages[i] = f2fs_compress_alloc_page(); if (!dic->tpages[i]) goto out_free; } @@ -1343,8 +1425,7 @@ void f2fs_free_dic(struct decompress_io_ctx *dic) continue; if (!dic->tpages[i]) continue; - unlock_page(dic->tpages[i]); - put_page(dic->tpages[i]); + f2fs_compress_free_page(dic->tpages[i]); } kfree(dic->tpages); } @@ -1353,7 +1434,7 @@ void f2fs_free_dic(struct decompress_io_ctx *dic) for (i = 0; i < dic->nr_cpages; i++) { if (!dic->cpages[i]) continue; - f2fs_put_compressed_page(dic->cpages[i]); + f2fs_compress_free_page(dic->cpages[i]); } kfree(dic->cpages); } diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 19abef392da371e1fa01cefb9cb8afac49611e34..4237025fd1f896813cea749fe160c344e060b22c 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -116,7 +116,8 @@ static enum count_type __read_io_type(struct page *page) /* postprocessing steps for read bios */ enum bio_post_read_step { STEP_DECRYPT, - STEP_DECOMPRESS, + STEP_DECOMPRESS_NOWQ, /* handle normal cluster data inplace */ + STEP_DECOMPRESS, /* handle compressed cluster data in workqueue */ STEP_VERITY, }; @@ -580,22 +581,28 @@ void f2fs_submit_bio(struct f2fs_sb_info *sbi, __submit_bio(sbi, bio, type); } -static void __attach_data_io_flag(struct f2fs_io_info *fio) +static void __attach_io_flag(struct f2fs_io_info *fio) { struct f2fs_sb_info *sbi = fio->sbi; unsigned int temp_mask = (1 << NR_TEMP_TYPE) - 1; - unsigned int fua_flag = sbi->data_io_flag & temp_mask; - unsigned int meta_flag = (sbi->data_io_flag >> NR_TEMP_TYPE) & - temp_mask; + unsigned int io_flag, fua_flag, meta_flag; + + if (fio->type == DATA) + io_flag = sbi->data_io_flag; + else if (fio->type == NODE) + io_flag = sbi->node_io_flag; + else + return; + + fua_flag = io_flag & temp_mask; + meta_flag = (io_flag >> NR_TEMP_TYPE) & temp_mask; + /* - * data io flag bits per temp: + * data/node io flag bits per temp: * REQ_META | REQ_FUA | * 5 | 4 | 3 | 2 | 1 | 0 | * Cold | Warm | Hot | Cold | Warm | Hot | */ - if (fio->type != DATA) - return; - if ((1 << fio->temp) & meta_flag) fio->op_flags |= REQ_META; if ((1 << fio->temp) & fua_flag) @@ -609,7 +616,7 @@ static void __submit_merged_bio(struct f2fs_bio_info *io) if (!io->bio) return; - __attach_data_io_flag(fio); + __attach_io_flag(fio); bio_set_op_attrs(io->bio, fio->op, fio->op_flags); if (is_read_io(fio->op)) @@ -754,6 +761,7 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio) if (fio->io_wbc && !is_read_io(fio->op)) wbc_account_io(fio->io_wbc, page, PAGE_SIZE); + __attach_io_flag(fio); bio_set_op_attrs(bio, fio->op, fio->op_flags); inc_page_count(fio->sbi, is_read_io(fio->op) ? @@ -827,9 +835,10 @@ static void del_bio_entry(struct bio_entry *be) kmem_cache_free(bio_entry_slab, be); } -static int add_ipu_page(struct f2fs_sb_info *sbi, struct bio **bio, +static int add_ipu_page(struct f2fs_io_info *fio, struct bio **bio, struct page *page) { + struct f2fs_sb_info *sbi = fio->sbi; enum temp_type temp; bool found = false; int ret = -EAGAIN; @@ -846,13 +855,18 @@ static int add_ipu_page(struct f2fs_sb_info *sbi, struct bio **bio, found = true; - if (bio_add_page(*bio, page, PAGE_SIZE, 0) == - PAGE_SIZE) { + if (page_is_mergeable(sbi, *bio, *fio->last_block, + fio->new_blkaddr) && + f2fs_crypt_mergeable_bio(*bio, + fio->page->mapping->host, + fio->page->index, fio) && + bio_add_page(*bio, page, PAGE_SIZE, 0) == + PAGE_SIZE) { ret = 0; break; } - /* bio is full */ + /* page can't be merged into bio; submit the bio */ del_bio_entry(be); __submit_bio(sbi, *bio, DATA); break; @@ -937,21 +951,17 @@ int f2fs_merge_page_bio(struct f2fs_io_info *fio) trace_f2fs_submit_page_bio(page, fio); f2fs_trace_ios(fio, 0); - if (bio && (!page_is_mergeable(fio->sbi, bio, *fio->last_block, - fio->new_blkaddr) || - !f2fs_crypt_mergeable_bio(bio, fio->page->mapping->host, - fio->page->index, fio))) - f2fs_submit_merged_ipu_write(fio->sbi, &bio, NULL); alloc_new: if (!bio) { bio = __bio_alloc(fio, BIO_MAX_PAGES); f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host, fio->page->index, fio, GFP_NOIO); + __attach_io_flag(fio); bio_set_op_attrs(bio, fio->op, fio->op_flags); add_bio_entry(fio->sbi, bio, page, fio->temp); } else { - if (add_ipu_page(fio->sbi, &bio, page)) + if (add_ipu_page(fio, &bio, page)) goto alloc_new; } @@ -1076,7 +1086,7 @@ static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr, if (fscrypt_inode_uses_fs_layer_crypto(inode)) post_read_steps |= 1 << STEP_DECRYPT; if (f2fs_compressed_file(inode)) - post_read_steps |= 1 << STEP_DECOMPRESS; + post_read_steps |= 1 << STEP_DECOMPRESS_NOWQ; if (f2fs_need_verity(inode, first_idx)) post_read_steps |= 1 << STEP_VERITY; @@ -1919,6 +1929,25 @@ static int f2fs_xattr_fiemap(struct inode *inode, return (err < 0 ? err : 0); } +static loff_t max_inode_blocks(struct inode *inode) +{ + loff_t result = ADDRS_PER_INODE(inode); + loff_t leaf_count = ADDRS_PER_BLOCK(inode); + + /* two direct node blocks */ + result += (leaf_count * 2); + + /* two indirect node blocks */ + leaf_count *= NIDS_PER_BLOCK; + result += (leaf_count * 2); + + /* one double indirect node block */ + leaf_count *= NIDS_PER_BLOCK; + result += leaf_count; + + return result; +} + int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, u64 start, u64 len) { @@ -1928,6 +1957,8 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, u64 logical = 0, phys = 0, size = 0; u32 flags = 0; int ret = 0; + bool compr_cluster = false; + unsigned int cluster_size = F2FS_I(inode)->i_cluster_size; if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) { ret = f2fs_precache_extents(inode); @@ -1962,6 +1993,9 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, memset(&map_bh, 0, sizeof(struct buffer_head)); map_bh.b_size = len; + if (compr_cluster) + map_bh.b_size = blk_to_logical(inode, cluster_size - 1); + ret = get_data_block(inode, start_blk, &map_bh, 0, F2FS_GET_BLOCK_FIEMAP, &next_pgofs); if (ret) @@ -1972,7 +2006,7 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, start_blk = next_pgofs; if (blk_to_logical(inode, start_blk) < blk_to_logical(inode, - F2FS_I_SB(inode)->max_file_blocks)) + max_inode_blocks(inode))) goto prep_next; flags |= FIEMAP_EXTENT_LAST; @@ -1984,11 +2018,38 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, ret = fiemap_fill_next_extent(fieinfo, logical, phys, size, flags); + if (ret) + goto out; + size = 0; } - if (start_blk > last_blk || ret) + if (start_blk > last_blk) goto out; + if (compr_cluster) { + compr_cluster = false; + + + logical = blk_to_logical(inode, start_blk - 1); + phys = blk_to_logical(inode, map_bh.b_blocknr); + size = blk_to_logical(inode, cluster_size); + + flags |= FIEMAP_EXTENT_ENCODED; + + start_blk += cluster_size - 1; + + if (start_blk > last_blk) + goto out; + + goto prep_next; + } + + if (map_bh.b_blocknr == COMPRESS_ADDR) { + compr_cluster = true; + start_blk++; + goto prep_next; + } + logical = blk_to_logical(inode, start_blk); phys = blk_to_logical(inode, map_bh.b_blocknr); size = map_bh.b_size; @@ -2226,6 +2287,7 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret, for (i = 0; i < dic->nr_cpages; i++) { struct page *page = dic->cpages[i]; block_t blkaddr; + struct bio_post_read_ctx *ctx; blkaddr = data_blkaddr(dn.inode, dn.node_page, dn.ofs_in_node + i + 1); @@ -2244,16 +2306,16 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret, page->index, for_write); if (IS_ERR(bio)) { ret = PTR_ERR(bio); - bio = NULL; dic->failed = true; if (refcount_sub_and_test(dic->nr_cpages - i, - &dic->ref)) + &dic->ref)) { f2fs_decompress_end_io(dic->rpages, cc->cluster_size, true, false); - f2fs_free_dic(dic); + f2fs_free_dic(dic); + } f2fs_put_dnode(&dn); - *bio_ret = bio; + *bio_ret = NULL; return ret; } } @@ -2263,8 +2325,14 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret, if (bio_add_page(bio, page, blocksize, 0) < blocksize) goto submit_and_realloc; + /* tag STEP_DECOMPRESS to handle IO in wq */ + ctx = bio->bi_private; + if (!(ctx->enabled_steps & (1 << STEP_DECOMPRESS))) + ctx->enabled_steps |= 1 << STEP_DECOMPRESS; + inc_page_count(sbi, F2FS_RD_DATA); f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE); + f2fs_update_iostat(sbi, FS_CDATA_READ_IO, F2FS_BLKSIZE); ClearPageError(page); *last_block_in_bio = blkaddr; } @@ -2894,7 +2962,6 @@ static int f2fs_write_cache_pages(struct address_space *mapping, pgoff_t index; pgoff_t end; /* Inclusive */ pgoff_t done_index; - int cycled; int range_whole = 0; int tag; int nwritten = 0; @@ -2912,17 +2979,12 @@ static int f2fs_write_cache_pages(struct address_space *mapping, if (wbc->range_cyclic) { writeback_index = mapping->writeback_index; /* prev offset */ index = writeback_index; - if (index == 0) - cycled = 1; - else - cycled = 0; end = -1; } else { index = wbc->range_start >> PAGE_SHIFT; end = wbc->range_end >> PAGE_SHIFT; if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) range_whole = 1; - cycled = 1; /* ignore range_cyclic tests */ } if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) tag = PAGECACHE_TAG_TOWRITE; @@ -3087,12 +3149,13 @@ static int f2fs_write_cache_pages(struct address_space *mapping, } } #endif - if ((!cycled && !done) || retry) { - cycled = 1; + if (retry) { index = 0; - end = writeback_index - 1; + end = -1; goto retry; } + if (wbc->range_cyclic && !done) + done_index = 0; if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) mapping->writeback_index = done_index; @@ -3746,6 +3809,37 @@ static int f2fs_set_data_page_dirty(struct page *page) return 0; } + +static sector_t f2fs_bmap_compress(struct inode *inode, sector_t block) +{ +#ifdef CONFIG_F2FS_FS_COMPRESSION + struct dnode_of_data dn; + sector_t start_idx, blknr = 0; + int ret; + + start_idx = round_down(block, F2FS_I(inode)->i_cluster_size); + + set_new_dnode(&dn, inode, NULL, NULL, 0); + ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE); + if (ret) + return 0; + + if (dn.data_blkaddr != COMPRESS_ADDR) { + dn.ofs_in_node += block - start_idx; + blknr = f2fs_data_blkaddr(&dn); + if (!__is_valid_data_blkaddr(blknr)) + blknr = 0; + } + + f2fs_put_dnode(&dn); + + return blknr; +#else + return -EOPNOTSUPP; +#endif +} + + static sector_t f2fs_bmap(struct address_space *mapping, sector_t block) { struct inode *inode = mapping->host; @@ -3757,6 +3851,9 @@ static sector_t f2fs_bmap(struct address_space *mapping, sector_t block) if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) filemap_write_and_wait(mapping); + if (f2fs_compressed_file(inode)) + return f2fs_bmap_compress(inode, block); + return generic_block_bmap(mapping, block, get_data_block_bmap); } diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c index 7fd0b08d75189aaa69795f2f67fa9435eb0e5185..959c3461da6a7997b7ea6fd8eb2967ce9f68a947 100644 --- a/fs/f2fs/dir.c +++ b/fs/f2fs/dir.c @@ -5,6 +5,7 @@ * Copyright (c) 2012 Samsung Electronics Co., Ltd. * http://www.samsung.com/ */ +#include #include #include #include @@ -70,6 +71,111 @@ unsigned char f2fs_get_de_type(struct f2fs_dir_entry *de) return DT_UNKNOWN; } +/* If @dir is casefolded, initialize @fname->cf_name from @fname->usr_fname. */ +int f2fs_init_casefolded_name(const struct inode *dir, + struct f2fs_filename *fname) +{ +#ifdef CONFIG_UNICODE + struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb); + + if (IS_CASEFOLDED(dir)) { + fname->cf_name.name = f2fs_kmalloc(sbi, F2FS_NAME_LEN, + GFP_NOFS); + if (!fname->cf_name.name) + return -ENOMEM; + fname->cf_name.len = utf8_casefold(sbi->sb->s_encoding, + fname->usr_fname, + fname->cf_name.name, + F2FS_NAME_LEN); + if ((int)fname->cf_name.len <= 0) { + kfree(fname->cf_name.name); + fname->cf_name.name = NULL; + if (sb_has_enc_strict_mode(dir->i_sb)) + return -EINVAL; + /* fall back to treating name as opaque byte sequence */ + } + } +#endif + return 0; +} + +static int __f2fs_setup_filename(const struct inode *dir, + const struct fscrypt_name *crypt_name, + struct f2fs_filename *fname) +{ + int err; + + memset(fname, 0, sizeof(*fname)); + + fname->usr_fname = crypt_name->usr_fname; + fname->disk_name = crypt_name->disk_name; +#ifdef CONFIG_FS_ENCRYPTION + fname->crypto_buf = crypt_name->crypto_buf; +#endif + if (crypt_name->is_ciphertext_name) { + /* hash was decoded from the no-key name */ + fname->hash = cpu_to_le32(crypt_name->hash); + } else { + err = f2fs_init_casefolded_name(dir, fname); + if (err) { + f2fs_free_filename(fname); + return err; + } + f2fs_hash_filename(dir, fname); + } + return 0; +} + +/* + * Prepare to search for @iname in @dir. This is similar to + * fscrypt_setup_filename(), but this also handles computing the casefolded name + * and the f2fs dirhash if needed, then packing all the information about this + * filename up into a 'struct f2fs_filename'. + */ +int f2fs_setup_filename(struct inode *dir, const struct qstr *iname, + int lookup, struct f2fs_filename *fname) +{ + struct fscrypt_name crypt_name; + int err; + + err = fscrypt_setup_filename(dir, iname, lookup, &crypt_name); + if (err) + return err; + + return __f2fs_setup_filename(dir, &crypt_name, fname); +} + +/* + * Prepare to look up @dentry in @dir. This is similar to + * fscrypt_prepare_lookup(), but this also handles computing the casefolded name + * and the f2fs dirhash if needed, then packing all the information about this + * filename up into a 'struct f2fs_filename'. + */ +int f2fs_prepare_lookup(struct inode *dir, struct dentry *dentry, + struct f2fs_filename *fname) +{ + struct fscrypt_name crypt_name; + int err; + + err = fscrypt_prepare_lookup(dir, dentry, &crypt_name); + if (err) + return err; + + return __f2fs_setup_filename(dir, &crypt_name, fname); +} + +void f2fs_free_filename(struct f2fs_filename *fname) +{ +#ifdef CONFIG_FS_ENCRYPTION + kfree(fname->crypto_buf.name); + fname->crypto_buf.name = NULL; +#endif +#ifdef CONFIG_UNICODE + kfree(fname->cf_name.name); + fname->cf_name.name = NULL; +#endif +} + static unsigned long dir_block_index(unsigned int level, int dir_level, unsigned int idx) { @@ -84,8 +190,7 @@ static unsigned long dir_block_index(unsigned int level, static struct f2fs_dir_entry *find_in_block(struct inode *dir, struct page *dentry_page, - struct fscrypt_name *fname, - f2fs_hash_t namehash, + const struct f2fs_filename *fname, int *max_slots, struct page **res_page) { @@ -96,7 +201,7 @@ static struct f2fs_dir_entry *find_in_block(struct inode *dir, dentry_blk = (struct f2fs_dentry_block *)page_address(dentry_page); make_dentry_ptr_block(dir, &d, dentry_blk); - de = f2fs_find_target_dentry(fname, namehash, max_slots, &d); + de = f2fs_find_target_dentry(&d, fname, max_slots); if (de) *res_page = dentry_page; @@ -107,130 +212,79 @@ static struct f2fs_dir_entry *find_in_block(struct inode *dir, /* * Test whether a case-insensitive directory entry matches the filename * being searched for. - * - * Only called for encrypted names if the key is available. - * - * Returns: 0 if the directory entry matches, more than 0 if it - * doesn't match or less than zero on error. */ -static int f2fs_ci_compare(const struct inode *parent, const struct qstr *name, - u8 *de_name, size_t de_name_len, bool quick) +static bool f2fs_match_ci_name(const struct inode *dir, const struct qstr *name, + const u8 *de_name, u32 de_name_len) { - const struct super_block *sb = parent->i_sb; + const struct super_block *sb = dir->i_sb; const struct unicode_map *um = sb->s_encoding; struct fscrypt_str decrypted_name = FSTR_INIT(NULL, de_name_len); struct qstr entry = QSTR_INIT(de_name, de_name_len); - int ret; + int res; - if (IS_ENCRYPTED(parent)) { + if (IS_ENCRYPTED(dir)) { const struct fscrypt_str encrypted_name = - FSTR_INIT(de_name, de_name_len); + FSTR_INIT((u8 *)de_name, de_name_len); + + if (WARN_ON_ONCE(!fscrypt_has_encryption_key(dir))) + return false; decrypted_name.name = kmalloc(de_name_len, GFP_KERNEL); if (!decrypted_name.name) - return -ENOMEM; - ret = fscrypt_fname_disk_to_usr(parent, 0, 0, &encrypted_name, + return false; + res = fscrypt_fname_disk_to_usr(dir, 0, 0, &encrypted_name, &decrypted_name); - if (ret < 0) + if (res < 0) goto out; entry.name = decrypted_name.name; entry.len = decrypted_name.len; } - if (quick) - ret = utf8_strncasecmp_folded(um, name, &entry); - else - ret = utf8_strncasecmp(um, name, &entry); - if (ret < 0) { - /* Handle invalid character sequence as either an error - * or as an opaque byte sequence. + res = utf8_strncasecmp_folded(um, name, &entry); + if (res < 0) { + /* + * In strict mode, ignore invalid names. In non-strict mode, + * fall back to treating them as opaque byte sequences. */ - if (sb_has_enc_strict_mode(sb)) - ret = -EINVAL; - else if (name->len != entry.len) - ret = 1; + if (sb_has_enc_strict_mode(sb) || name->len != entry.len) + res = 1; else - ret = !!memcmp(name->name, entry.name, entry.len); + res = memcmp(name->name, entry.name, name->len); } out: kfree(decrypted_name.name); - return ret; + return res == 0; } +#endif /* CONFIG_UNICODE */ -static void f2fs_fname_setup_ci_filename(struct inode *dir, - const struct qstr *iname, - struct fscrypt_str *cf_name) +static inline bool f2fs_match_name(const struct inode *dir, + const struct f2fs_filename *fname, + const u8 *de_name, u32 de_name_len) { - struct f2fs_sb_info *sbi = F2FS_I_SB(dir); - - if (!IS_CASEFOLDED(dir)) { - cf_name->name = NULL; - return; - } + struct fscrypt_name f; - cf_name->name = f2fs_kmalloc(sbi, F2FS_NAME_LEN, GFP_NOFS); - if (!cf_name->name) - return; - - cf_name->len = utf8_casefold(dir->i_sb->s_encoding, - iname, cf_name->name, - F2FS_NAME_LEN); - if ((int)cf_name->len <= 0) { - kvfree(cf_name->name); - cf_name->name = NULL; - } -} -#endif - -static inline bool f2fs_match_name(struct f2fs_dentry_ptr *d, - struct f2fs_dir_entry *de, - struct fscrypt_name *fname, - struct fscrypt_str *cf_str, - unsigned long bit_pos, - f2fs_hash_t namehash) -{ #ifdef CONFIG_UNICODE - struct inode *parent = d->inode; - u8 *name; - int len; -#endif - - if (de->hash_code != namehash) - return false; + if (fname->cf_name.name) { + struct qstr cf = FSTR_TO_QSTR(&fname->cf_name); -#ifdef CONFIG_UNICODE - name = d->filename[bit_pos]; - len = le16_to_cpu(de->name_len); - - if (needs_casefold(parent)) { - if (cf_str->name) { - struct qstr cf = {.name = cf_str->name, - .len = cf_str->len}; - return !f2fs_ci_compare(parent, &cf, name, len, true); - } - return !f2fs_ci_compare(parent, fname->usr_fname, name, len, - false); + return f2fs_match_ci_name(dir, &cf, de_name, de_name_len); } #endif - if (fscrypt_match_name(fname, d->filename[bit_pos], - le16_to_cpu(de->name_len))) - return true; - return false; + f.usr_fname = fname->usr_fname; + f.disk_name = fname->disk_name; +#ifdef CONFIG_FS_ENCRYPTION + f.crypto_buf = fname->crypto_buf; +#endif + return fscrypt_match_name(&f, de_name, de_name_len); } -struct f2fs_dir_entry *f2fs_find_target_dentry(struct fscrypt_name *fname, - f2fs_hash_t namehash, int *max_slots, - struct f2fs_dentry_ptr *d) +struct f2fs_dir_entry *f2fs_find_target_dentry(const struct f2fs_dentry_ptr *d, + const struct f2fs_filename *fname, int *max_slots) { struct f2fs_dir_entry *de; - struct fscrypt_str cf_str = { .name = NULL, .len = 0 }; unsigned long bit_pos = 0; int max_len = 0; -#ifdef CONFIG_UNICODE - f2fs_fname_setup_ci_filename(d->inode, fname->usr_fname, &cf_str); -#endif - if (max_slots) *max_slots = 0; while (bit_pos < d->max) { @@ -247,7 +301,9 @@ struct f2fs_dir_entry *f2fs_find_target_dentry(struct fscrypt_name *fname, continue; } - if (f2fs_match_name(d, de, fname, &cf_str, bit_pos, namehash)) + if (de->hash_code == fname->hash && + f2fs_match_name(d->inode, fname, d->filename[bit_pos], + le16_to_cpu(de->name_len))) goto found; if (max_slots && max_len > *max_slots) @@ -261,33 +317,27 @@ struct f2fs_dir_entry *f2fs_find_target_dentry(struct fscrypt_name *fname, found: if (max_slots && max_len > *max_slots) *max_slots = max_len; - -#ifdef CONFIG_UNICODE - kvfree(cf_str.name); -#endif return de; } static struct f2fs_dir_entry *find_in_level(struct inode *dir, unsigned int level, - struct fscrypt_name *fname, + const struct f2fs_filename *fname, struct page **res_page) { - struct qstr name = FSTR_TO_QSTR(&fname->disk_name); - int s = GET_DENTRY_SLOTS(name.len); + int s = GET_DENTRY_SLOTS(fname->disk_name.len); unsigned int nbucket, nblock; unsigned int bidx, end_block; struct page *dentry_page; struct f2fs_dir_entry *de = NULL; bool room = false; int max_slots; - f2fs_hash_t namehash = f2fs_dentry_hash(dir, &name, fname); nbucket = dir_buckets(level, F2FS_I(dir)->i_dir_level); nblock = bucket_blocks(level); bidx = dir_block_index(level, F2FS_I(dir)->i_dir_level, - le32_to_cpu(namehash) % nbucket); + le32_to_cpu(fname->hash) % nbucket); end_block = bidx + nblock; for (; bidx < end_block; bidx++) { @@ -303,8 +353,8 @@ static struct f2fs_dir_entry *find_in_level(struct inode *dir, } } - de = find_in_block(dir, dentry_page, fname, namehash, - &max_slots, res_page); + de = find_in_block(dir, dentry_page, fname, &max_slots, + res_page); if (de) break; @@ -313,8 +363,8 @@ static struct f2fs_dir_entry *find_in_level(struct inode *dir, f2fs_put_page(dentry_page, 0); } - if (!de && room && F2FS_I(dir)->chash != namehash) { - F2FS_I(dir)->chash = namehash; + if (!de && room && F2FS_I(dir)->chash != fname->hash) { + F2FS_I(dir)->chash = fname->hash; F2FS_I(dir)->clevel = level; } @@ -322,7 +372,8 @@ static struct f2fs_dir_entry *find_in_level(struct inode *dir, } struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir, - struct fscrypt_name *fname, struct page **res_page) + const struct f2fs_filename *fname, + struct page **res_page) { unsigned long npages = dir_blocks(dir); struct f2fs_dir_entry *de = NULL; @@ -371,18 +422,10 @@ struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir, const struct qstr *child, struct page **res_page) { struct f2fs_dir_entry *de = NULL; - struct fscrypt_name fname; + struct f2fs_filename fname; int err; -#ifdef CONFIG_UNICODE - if (sb_has_enc_strict_mode(dir->i_sb) && IS_CASEFOLDED(dir) && - utf8_validate(dir->i_sb->s_encoding, child)) { - *res_page = ERR_PTR(-EINVAL); - return NULL; - } -#endif - - err = fscrypt_setup_filename(dir, child, 1, &fname); + err = f2fs_setup_filename(dir, child, 1, &fname); if (err) { if (err == -ENOENT) *res_page = NULL; @@ -393,7 +436,7 @@ struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir, de = __f2fs_find_entry(dir, &fname, res_page); - fscrypt_free_filename(&fname); + f2fs_free_filename(&fname); return de; } @@ -434,24 +477,47 @@ void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de, f2fs_put_page(page, 1); } -static void init_dent_inode(const struct qstr *name, struct page *ipage) +static void init_dent_inode(struct inode *dir, struct inode *inode, + const struct f2fs_filename *fname, + struct page *ipage) { struct f2fs_inode *ri; + if (!fname) /* tmpfile case? */ + return; + f2fs_wait_on_page_writeback(ipage, NODE, true, true); /* copy name info. to this inode page */ ri = F2FS_INODE(ipage); - ri->i_namelen = cpu_to_le32(name->len); - memcpy(ri->i_name, name->name, name->len); + ri->i_namelen = cpu_to_le32(fname->disk_name.len); + memcpy(ri->i_name, fname->disk_name.name, fname->disk_name.len); + if (IS_ENCRYPTED(dir)) { + file_set_enc_name(inode); + /* + * Roll-forward recovery doesn't have encryption keys available, + * so it can't compute the dirhash for encrypted+casefolded + * filenames. Append it to i_name if possible. Else, disable + * roll-forward recovery of the dentry (i.e., make fsync'ing the + * file force a checkpoint) by setting LOST_PINO. + */ + if (IS_CASEFOLDED(dir)) { + if (fname->disk_name.len + sizeof(f2fs_hash_t) <= + F2FS_NAME_LEN) + put_unaligned(fname->hash, (f2fs_hash_t *) + &ri->i_name[fname->disk_name.len]); + else + file_lost_pino(inode); + } + } set_page_dirty(ipage); } void f2fs_do_make_empty_dir(struct inode *inode, struct inode *parent, struct f2fs_dentry_ptr *d) { - struct qstr dot = QSTR_INIT(".", 1); - struct qstr dotdot = QSTR_INIT("..", 2); + struct fscrypt_str dot = FSTR_INIT(".", 1); + struct fscrypt_str dotdot = FSTR_INIT("..", 2); /* update dirent of "." */ f2fs_update_dentry(inode->i_ino, inode->i_mode, d, &dot, 0, 0); @@ -485,8 +551,7 @@ static int make_empty_dir(struct inode *inode, } struct page *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir, - const struct qstr *new_name, const struct qstr *orig_name, - struct page *dpage) + const struct f2fs_filename *fname, struct page *dpage) { struct page *page; int err; @@ -511,7 +576,8 @@ struct page *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir, if (err) goto put_error; - err = f2fs_init_security(inode, dir, orig_name, page); + err = f2fs_init_security(inode, dir, + fname ? fname->usr_fname : NULL, page); if (err) goto put_error; @@ -526,11 +592,7 @@ struct page *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir, return page; } - if (new_name) { - init_dent_inode(new_name, page); - if (IS_ENCRYPTED(dir)) - file_set_enc_name(inode); - } + init_dent_inode(dir, inode, fname, page); /* * This file should be checkpointed during fsync. @@ -595,11 +657,11 @@ int f2fs_room_for_filename(const void *bitmap, int slots, int max_slots) } bool f2fs_has_enough_room(struct inode *dir, struct page *ipage, - struct fscrypt_name *fname) + const struct f2fs_filename *fname) { struct f2fs_dentry_ptr d; unsigned int bit_pos; - int slots = GET_DENTRY_SLOTS(fname_len(fname)); + int slots = GET_DENTRY_SLOTS(fname->disk_name.len); make_dentry_ptr_inline(dir, &d, inline_data_addr(dir, ipage)); @@ -609,8 +671,8 @@ bool f2fs_has_enough_room(struct inode *dir, struct page *ipage, } void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d, - const struct qstr *name, f2fs_hash_t name_hash, - unsigned int bit_pos) + const struct fscrypt_str *name, f2fs_hash_t name_hash, + unsigned int bit_pos) { struct f2fs_dir_entry *de; int slots = GET_DENTRY_SLOTS(name->len); @@ -630,10 +692,8 @@ void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d, } } -int f2fs_add_regular_entry(struct inode *dir, const struct qstr *new_name, - const struct qstr *orig_name, - f2fs_hash_t dentry_hash, - struct inode *inode, nid_t ino, umode_t mode) +int f2fs_add_regular_entry(struct inode *dir, const struct f2fs_filename *fname, + struct inode *inode, nid_t ino, umode_t mode) { unsigned int bit_pos; unsigned int level; @@ -647,10 +707,10 @@ int f2fs_add_regular_entry(struct inode *dir, const struct qstr *new_name, int slots, err = 0; level = 0; - slots = GET_DENTRY_SLOTS(new_name->len); + slots = GET_DENTRY_SLOTS(fname->disk_name.len); current_depth = F2FS_I(dir)->i_current_depth; - if (F2FS_I(dir)->chash == dentry_hash) { + if (F2FS_I(dir)->chash == fname->hash) { level = F2FS_I(dir)->clevel; F2FS_I(dir)->chash = 0; } @@ -672,7 +732,7 @@ int f2fs_add_regular_entry(struct inode *dir, const struct qstr *new_name, nblock = bucket_blocks(level); bidx = dir_block_index(level, F2FS_I(dir)->i_dir_level, - (le32_to_cpu(dentry_hash) % nbucket)); + (le32_to_cpu(fname->hash) % nbucket)); for (block = bidx; block <= (bidx + nblock - 1); block++) { dentry_page = f2fs_get_new_data_page(dir, NULL, block, true); @@ -696,8 +756,7 @@ int f2fs_add_regular_entry(struct inode *dir, const struct qstr *new_name, if (inode) { down_write(&F2FS_I(inode)->i_sem); - page = f2fs_init_inode_metadata(inode, dir, new_name, - orig_name, NULL); + page = f2fs_init_inode_metadata(inode, dir, fname, NULL); if (IS_ERR(page)) { err = PTR_ERR(page); goto fail; @@ -705,7 +764,8 @@ int f2fs_add_regular_entry(struct inode *dir, const struct qstr *new_name, } make_dentry_ptr_block(NULL, &d, dentry_blk); - f2fs_update_dentry(ino, mode, &d, new_name, dentry_hash, bit_pos); + f2fs_update_dentry(ino, mode, &d, &fname->disk_name, fname->hash, + bit_pos); set_page_dirty(dentry_page); @@ -729,23 +789,15 @@ int f2fs_add_regular_entry(struct inode *dir, const struct qstr *new_name, return err; } -int f2fs_add_dentry(struct inode *dir, struct fscrypt_name *fname, - struct inode *inode, nid_t ino, umode_t mode) +int f2fs_add_dentry(struct inode *dir, const struct f2fs_filename *fname, + struct inode *inode, nid_t ino, umode_t mode) { - struct qstr new_name; - f2fs_hash_t dentry_hash; int err = -EAGAIN; - new_name.name = fname_name(fname); - new_name.len = fname_len(fname); - if (f2fs_has_inline_dentry(dir)) - err = f2fs_add_inline_entry(dir, &new_name, fname, - inode, ino, mode); - dentry_hash = f2fs_dentry_hash(dir, &new_name, fname); + err = f2fs_add_inline_entry(dir, fname, inode, ino, mode); if (err == -EAGAIN) - err = f2fs_add_regular_entry(dir, &new_name, fname->usr_fname, - dentry_hash, inode, ino, mode); + err = f2fs_add_regular_entry(dir, fname, inode, ino, mode); f2fs_update_time(F2FS_I_SB(dir), REQ_TIME); return err; @@ -758,12 +810,12 @@ int f2fs_add_dentry(struct inode *dir, struct fscrypt_name *fname, int f2fs_do_add_link(struct inode *dir, const struct qstr *name, struct inode *inode, nid_t ino, umode_t mode) { - struct fscrypt_name fname; + struct f2fs_filename fname; struct page *page = NULL; struct f2fs_dir_entry *de = NULL; int err; - err = fscrypt_setup_filename(dir, name, 0, &fname); + err = f2fs_setup_filename(dir, name, 0, &fname); if (err) return err; @@ -786,7 +838,7 @@ int f2fs_do_add_link(struct inode *dir, const struct qstr *name, } else { err = f2fs_add_dentry(dir, &fname, inode, ino, mode); } - fscrypt_free_filename(&fname); + f2fs_free_filename(&fname); return err; } @@ -796,7 +848,7 @@ int f2fs_do_tmpfile(struct inode *inode, struct inode *dir) int err = 0; down_write(&F2FS_I(inode)->i_sem); - page = f2fs_init_inode_metadata(inode, dir, NULL, NULL, NULL); + page = f2fs_init_inode_metadata(inode, dir, NULL, NULL); if (IS_ERR(page)) { err = PTR_ERR(page); goto fail; diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index f55818a8c263ef0f198880b64c9b79413228f6d5..48350276ac8725034a4dc8e5ddb4a539b98bd8ad 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0 */ /* * fs/f2fs/f2fs.h * @@ -138,10 +138,11 @@ struct f2fs_mount_info { int fsync_mode; /* fsync policy */ int fs_mode; /* fs mode: LFS or ADAPTIVE */ int bggc_mode; /* bggc mode: off, on or sync */ - bool test_dummy_encryption; /* test dummy encryption */ + struct fscrypt_dummy_context dummy_enc_ctx; /* test dummy encryption */ #ifdef CONFIG_FS_ENCRYPTION bool inlinecrypt; /* inline encryption enabled */ #endif + block_t unusable_cap_perc; /* percentage for cap */ block_t unusable_cap; /* Amount of space allowed to be * unusable when disabling checkpoint */ @@ -197,6 +198,7 @@ enum { #define CP_DISCARD 0x00000010 #define CP_TRIMMED 0x00000020 #define CP_PAUSE 0x00000040 +#define CP_RESIZE 0x00000080 #define MAX_DISCARD_BLOCKS(sbi) BLKS_PER_SEC(sbi) #define DEF_MAX_DISCARD_REQUEST 8 /* issue 8 discards per round */ @@ -431,6 +433,10 @@ static inline bool __has_cursum_space(struct f2fs_journal *journal, #define F2FS_IOC_PRECACHE_EXTENTS _IO(F2FS_IOCTL_MAGIC, 15) #define F2FS_IOC_RESIZE_FS _IOW(F2FS_IOCTL_MAGIC, 16, __u64) #define F2FS_IOC_GET_COMPRESS_BLOCKS _IOR(F2FS_IOCTL_MAGIC, 17, __u64) +#define F2FS_IOC_RELEASE_COMPRESS_BLOCKS \ + _IOR(F2FS_IOCTL_MAGIC, 18, __u64) +#define F2FS_IOC_RESERVE_COMPRESS_BLOCKS \ + _IOR(F2FS_IOCTL_MAGIC, 19, __u64) #define F2FS_IOC_SET_ENCRYPTION_POLICY FS_IOC_SET_ENCRYPTION_POLICY #define F2FS_IOC_GET_ENCRYPTION_POLICY FS_IOC_GET_ENCRYPTION_POLICY @@ -506,6 +512,44 @@ static inline int get_inline_xattr_addrs(struct inode *inode); * For INODE and NODE manager */ /* for directory operations */ + +struct f2fs_filename { + /* + * The filename the user specified. This is NULL for some + * filesystem-internal operations, e.g. converting an inline directory + * to a non-inline one, or roll-forward recovering an encrypted dentry. + */ + const struct qstr *usr_fname; + + /* + * The on-disk filename. For encrypted directories, this is encrypted. + * This may be NULL for lookups in an encrypted dir without the key. + */ + struct fscrypt_str disk_name; + + /* The dirhash of this filename */ + f2fs_hash_t hash; + +#ifdef CONFIG_FS_ENCRYPTION + /* + * For lookups in encrypted directories: either the buffer backing + * disk_name, or a buffer that holds the decoded no-key name. + */ + struct fscrypt_str crypto_buf; +#endif +#ifdef CONFIG_UNICODE + /* + * For casefolded directories: the casefolded name, but it's left NULL + * if the original name is not valid Unicode, if the directory is both + * casefolded and encrypted and its encryption key is unavailable, or if + * the filesystem is doing an internal operation where usr_fname is also + * NULL. In all these cases we fall back to treating the name as an + * opaque byte sequence. + */ + struct fscrypt_str cf_name; +#endif +}; + struct f2fs_dentry_ptr { struct inode *inode; void *bitmap; @@ -1108,6 +1152,8 @@ enum iostat_type { APP_READ_IO, /* app read IOs */ APP_MAPPED_READ_IO, /* app mapped read IOs */ FS_DATA_READ_IO, /* data read IOs */ + FS_GDATA_READ_IO, /* data read IOs from background gc */ + FS_CDATA_READ_IO, /* compressed data read IOs */ FS_NODE_READ_IO, /* node read IOs */ FS_META_READ_IO, /* meta read IOs */ @@ -1271,7 +1317,7 @@ enum fsync_mode { #ifdef CONFIG_FS_ENCRYPTION #define DUMMY_ENCRYPTION_ENABLED(sbi) \ - (unlikely(F2FS_OPTION(sbi).test_dummy_encryption)) + (unlikely(F2FS_OPTION(sbi).dummy_enc_ctx.ctx != NULL)) #else #define DUMMY_ENCRYPTION_ENABLED(sbi) (0) #endif @@ -1426,7 +1472,6 @@ struct f2fs_sb_info { unsigned int segs_per_sec; /* segments per section */ unsigned int secs_per_zone; /* sections per zone */ unsigned int total_sections; /* total section count */ - struct mutex resize_mutex; /* for resize exclusion */ unsigned int total_node_count; /* total node block count */ unsigned int total_valid_node_count; /* valid node block count */ loff_t max_file_blocks; /* max block index of file */ @@ -1520,6 +1565,7 @@ struct f2fs_sb_info { /* to attach REQ_META|REQ_FUA flags */ unsigned int data_io_flag; + unsigned int node_io_flag; /* For sysfs suppport */ struct kobject s_kobj; @@ -2917,12 +2963,12 @@ static inline bool f2fs_cp_error(struct f2fs_sb_info *sbi) return is_set_ckpt_flags(sbi, CP_ERROR_FLAG); } -static inline bool is_dot_dotdot(const struct qstr *str) +static inline bool is_dot_dotdot(const u8 *name, size_t len) { - if (str->len == 1 && str->name[0] == '.') + if (len == 1 && name[0] == '.') return true; - if (str->len == 2 && str->name[0] == '.' && str->name[1] == '.') + if (len == 2 && name[0] == '.' && name[1] == '.') return true; return false; @@ -2950,18 +2996,12 @@ static inline bool f2fs_may_extent_tree(struct inode *inode) static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi, size_t size, gfp_t flags) { - void *ret; - if (time_to_inject(sbi, FAULT_KMALLOC)) { f2fs_show_injection_info(sbi, FAULT_KMALLOC); return NULL; } - ret = kmalloc(size, flags); - if (ret) - return ret; - - return kvmalloc(size, flags); + return kmalloc(size, flags); } static inline void *f2fs_kzalloc(struct f2fs_sb_info *sbi, @@ -3102,6 +3142,7 @@ static inline void f2fs_clear_page_private(struct page *page) */ int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync); void f2fs_truncate_data_blocks(struct dnode_of_data *dn); +int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock); int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock); int f2fs_truncate(struct inode *inode); int f2fs_getattr(const struct path *path, struct kstat *stat, @@ -3141,22 +3182,28 @@ struct dentry *f2fs_get_parent(struct dentry *child); * dir.c */ unsigned char f2fs_get_de_type(struct f2fs_dir_entry *de); -struct f2fs_dir_entry *f2fs_find_target_dentry(struct fscrypt_name *fname, - f2fs_hash_t namehash, int *max_slots, - struct f2fs_dentry_ptr *d); +int f2fs_init_casefolded_name(const struct inode *dir, + struct f2fs_filename *fname); +int f2fs_setup_filename(struct inode *dir, const struct qstr *iname, + int lookup, struct f2fs_filename *fname); +int f2fs_prepare_lookup(struct inode *dir, struct dentry *dentry, + struct f2fs_filename *fname); +void f2fs_free_filename(struct f2fs_filename *fname); +struct f2fs_dir_entry *f2fs_find_target_dentry(const struct f2fs_dentry_ptr *d, + const struct f2fs_filename *fname, int *max_slots); int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d, unsigned int start_pos, struct fscrypt_str *fstr); void f2fs_do_make_empty_dir(struct inode *inode, struct inode *parent, struct f2fs_dentry_ptr *d); struct page *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir, - const struct qstr *new_name, - const struct qstr *orig_name, struct page *dpage); + const struct f2fs_filename *fname, struct page *dpage); void f2fs_update_parent_metadata(struct inode *dir, struct inode *inode, unsigned int current_depth); int f2fs_room_for_filename(const void *bitmap, int slots, int max_slots); void f2fs_drop_nlink(struct inode *dir, struct inode *inode); struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir, - struct fscrypt_name *fname, struct page **res_page); + const struct f2fs_filename *fname, + struct page **res_page); struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir, const struct qstr *child, struct page **res_page); struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p); @@ -3165,14 +3212,13 @@ ino_t f2fs_inode_by_name(struct inode *dir, const struct qstr *qstr, void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de, struct page *page, struct inode *inode); bool f2fs_has_enough_room(struct inode *dir, struct page *ipage, - struct fscrypt_name *fname); + const struct f2fs_filename *fname); void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d, - const struct qstr *name, f2fs_hash_t name_hash, + const struct fscrypt_str *name, f2fs_hash_t name_hash, unsigned int bit_pos); -int f2fs_add_regular_entry(struct inode *dir, const struct qstr *new_name, - const struct qstr *orig_name, f2fs_hash_t dentry_hash, +int f2fs_add_regular_entry(struct inode *dir, const struct f2fs_filename *fname, struct inode *inode, nid_t ino, umode_t mode); -int f2fs_add_dentry(struct inode *dir, struct fscrypt_name *fname, +int f2fs_add_dentry(struct inode *dir, const struct f2fs_filename *fname, struct inode *inode, nid_t ino, umode_t mode); int f2fs_do_add_link(struct inode *dir, const struct qstr *name, struct inode *inode, nid_t ino, umode_t mode); @@ -3202,8 +3248,7 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi); /* * hash.c */ -f2fs_hash_t f2fs_dentry_hash(const struct inode *dir, - const struct qstr *name_info, const struct fscrypt_name *fname); +void f2fs_hash_filename(const struct inode *dir, struct f2fs_filename *fname); /* * node.c @@ -3235,6 +3280,7 @@ void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid); struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid); struct page *f2fs_get_node_page_ra(struct page *parent, int start); int f2fs_move_node_page(struct page *node_page, int gc_type); +int f2fs_flush_inline_data(struct f2fs_sb_info *sbi); int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode, struct writeback_control *wbc, bool atomic, unsigned int *seq_id); @@ -3679,7 +3725,7 @@ static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; } static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { } static inline void __init f2fs_create_root_stats(void) { } static inline void f2fs_destroy_root_stats(void) { } -static inline void update_sit_info(struct f2fs_sb_info *sbi) {} +static inline void f2fs_update_sit_info(struct f2fs_sb_info *sbi) {} #endif extern const struct file_operations f2fs_dir_operations; @@ -3709,11 +3755,11 @@ int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry); int f2fs_write_inline_data(struct inode *inode, struct page *page); bool f2fs_recover_inline_data(struct inode *inode, struct page *npage); struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir, - struct fscrypt_name *fname, struct page **res_page); + const struct f2fs_filename *fname, + struct page **res_page); int f2fs_make_empty_inline_dir(struct inode *inode, struct inode *parent, struct page *ipage); -int f2fs_add_inline_entry(struct inode *dir, const struct qstr *new_name, - const struct fscrypt_name *fname, +int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname, struct inode *inode, nid_t ino, umode_t mode); void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, struct page *page, struct inode *dir, @@ -3812,8 +3858,11 @@ int f2fs_prepare_compress_overwrite(struct inode *inode, struct page **pagep, pgoff_t index, void **fsdata); bool f2fs_compress_write_end(struct inode *inode, void *fsdata, pgoff_t index, unsigned copied); +int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock); void f2fs_compress_write_end_io(struct bio *bio, struct page *page); bool f2fs_is_compress_backend_ready(struct inode *inode); +int f2fs_init_compress_mempool(void); +void f2fs_destroy_compress_mempool(void); void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity); bool f2fs_cluster_is_empty(struct compress_ctx *cc); bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index); @@ -3847,6 +3896,8 @@ static inline struct page *f2fs_compress_control_page(struct page *page) WARN_ON_ONCE(1); return ERR_PTR(-EINVAL); } +static inline int f2fs_init_compress_mempool(void) { return 0; } +static inline void f2fs_destroy_compress_mempool(void) { } #endif static inline void set_compress_context(struct inode *inode) @@ -3993,6 +4044,10 @@ static inline void f2fs_i_compr_blocks_update(struct inode *inode, { int diff = F2FS_I(inode)->i_cluster_size - blocks; + /* don't update i_compr_blocks if saved blocks were released */ + if (!add && !F2FS_I(inode)->i_compr_blocks) + return; + if (add) { F2FS_I(inode)->i_compr_blocks += diff; stat_add_compr_blocks(inode, diff); @@ -4034,10 +4089,10 @@ static inline bool f2fs_force_buffered_io(struct inode *inode, return true; if (fsverity_active(inode)) return true; - if (f2fs_is_multi_device(sbi)) - return true; if (f2fs_compressed_file(inode)) return true; + if (f2fs_is_multi_device(sbi)) + return true; /* * for blkzoned device, fallback direct IO to buffered IO, so * all IOs can be serialized by log-structured write. diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index 311a36cba3306b137e5941e16cc63d658d2f064b..f45234243c62d5d3eb38ba7e93670627d48b47c4 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -169,9 +169,11 @@ static int get_parent_ino(struct inode *inode, nid_t *pino) { struct dentry *dentry; - inode = igrab(inode); - dentry = d_find_any_alias(inode); - iput(inode); + /* + * Make sure to get the non-deleted alias. The alias associated with + * the open file descriptor being fsync()'ed may be deleted already. + */ + dentry = d_find_alias(inode); if (!dentry) return 0; @@ -572,6 +574,7 @@ void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count) bool compressed_cluster = false; int cluster_index = 0, valid_blocks = 0; int cluster_size = F2FS_I(dn->inode)->i_cluster_size; + bool released = !F2FS_I(dn->inode)->i_compr_blocks; if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode)) base = get_extra_isize(dn->inode); @@ -610,7 +613,9 @@ void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count) clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN); f2fs_invalidate_blocks(sbi, blkaddr); - nr_free++; + + if (!released || blkaddr != COMPRESS_ADDR) + nr_free++; } if (compressed_cluster) @@ -658,9 +663,6 @@ static int truncate_partial_data_page(struct inode *inode, u64 from, return 0; } - if (f2fs_compressed_file(inode)) - return 0; - page = f2fs_get_lock_data_page(inode, index, true); if (IS_ERR(page)) return PTR_ERR(page) == -ENOENT ? 0 : PTR_ERR(page); @@ -676,7 +678,7 @@ static int truncate_partial_data_page(struct inode *inode, u64 from, return 0; } -static int do_truncate_blocks(struct inode *inode, u64 from, bool lock) +int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct dnode_of_data dn; @@ -744,23 +746,28 @@ static int do_truncate_blocks(struct inode *inode, u64 from, bool lock) int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock) { u64 free_from = from; + int err; +#ifdef CONFIG_F2FS_FS_COMPRESSION /* * for compressed file, only support cluster size * aligned truncation. */ - if (f2fs_compressed_file(inode)) { - size_t cluster_shift = PAGE_SHIFT + - F2FS_I(inode)->i_log_cluster_size; - size_t cluster_mask = (1 << cluster_shift) - 1; + if (f2fs_compressed_file(inode)) + free_from = round_up(from, + F2FS_I(inode)->i_cluster_size << PAGE_SHIFT); +#endif - free_from = from >> cluster_shift; - if (from & cluster_mask) - free_from++; - free_from <<= cluster_shift; - } + err = f2fs_do_truncate_blocks(inode, free_from, lock); + if (err) + return err; - return do_truncate_blocks(inode, free_from, lock); +#ifdef CONFIG_F2FS_FS_COMPRESSION + if (from != free_from) + err = f2fs_truncate_partial_cluster(inode, from, lock); +#endif + + return err; } int f2fs_truncate(struct inode *inode) @@ -986,9 +993,7 @@ const struct inode_operations f2fs_file_inode_operations = { .setattr = f2fs_setattr, .get_acl = f2fs_get_acl, .set_acl = f2fs_set_acl, -#ifdef CONFIG_F2FS_FS_XATTR .listxattr = f2fs_listxattr, -#endif .fiemap = f2fs_fiemap, }; @@ -1667,7 +1672,11 @@ static int expand_inode_data(struct inode *inode, loff_t offset, down_write(&sbi->pin_sem); map.m_seg_type = CURSEG_COLD_DATA_PINNED; + + f2fs_lock_op(sbi); f2fs_allocate_new_segments(sbi, CURSEG_COLD_DATA); + f2fs_unlock_op(sbi); + err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO); up_write(&sbi->pin_sem); @@ -2237,8 +2246,15 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg) if (in != F2FS_GOING_DOWN_FULLSYNC) { ret = mnt_want_write_file(filp); - if (ret) + if (ret) { + if (ret == -EROFS) { + ret = 0; + f2fs_stop_checkpoint(sbi, false); + set_sbi_flag(sbi, SBI_IS_SHUTDOWN); + trace_f2fs_shutdown(sbi, in, ret); + } return ret; + } } switch (in) { @@ -3319,7 +3335,6 @@ static int f2fs_ioc_resize_fs(struct file *filp, unsigned long arg) { struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp)); __u64 block_count; - int ret; if (!capable(CAP_SYS_ADMIN)) return -EPERM; @@ -3331,9 +3346,7 @@ static int f2fs_ioc_resize_fs(struct file *filp, unsigned long arg) sizeof(block_count))) return -EFAULT; - ret = f2fs_resize_fs(sbi, block_count); - - return ret; + return f2fs_resize_fs(sbi, block_count); } static int f2fs_ioc_enable_verity(struct file *filp, unsigned long arg) @@ -3375,6 +3388,326 @@ static int f2fs_get_compress_blocks(struct file *filp, unsigned long arg) return put_user(blocks, (u64 __user *)arg); } +static int release_compress_blocks(struct dnode_of_data *dn, pgoff_t count) +{ + struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); + unsigned int released_blocks = 0; + int cluster_size = F2FS_I(dn->inode)->i_cluster_size; + block_t blkaddr; + int i; + + for (i = 0; i < count; i++) { + blkaddr = data_blkaddr(dn->inode, dn->node_page, + dn->ofs_in_node + i); + + if (!__is_valid_data_blkaddr(blkaddr)) + continue; + if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr, + DATA_GENERIC_ENHANCE))) + return -EFSCORRUPTED; + } + + while (count) { + int compr_blocks = 0; + + for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) { + blkaddr = f2fs_data_blkaddr(dn); + + if (i == 0) { + if (blkaddr == COMPRESS_ADDR) + continue; + dn->ofs_in_node += cluster_size; + goto next; + } + + if (__is_valid_data_blkaddr(blkaddr)) + compr_blocks++; + + if (blkaddr != NEW_ADDR) + continue; + + dn->data_blkaddr = NULL_ADDR; + f2fs_set_data_blkaddr(dn); + } + + f2fs_i_compr_blocks_update(dn->inode, compr_blocks, false); + dec_valid_block_count(sbi, dn->inode, + cluster_size - compr_blocks); + + released_blocks += cluster_size - compr_blocks; +next: + count -= cluster_size; + } + + return released_blocks; +} + +static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg) +{ + struct inode *inode = file_inode(filp); + struct f2fs_sb_info *sbi = F2FS_I_SB(inode); + pgoff_t page_idx = 0, last_idx; + unsigned int released_blocks = 0; + int ret; + int writecount; + + if (!f2fs_sb_has_compression(F2FS_I_SB(inode))) + return -EOPNOTSUPP; + + if (!f2fs_compressed_file(inode)) + return -EINVAL; + + if (f2fs_readonly(sbi->sb)) + return -EROFS; + + ret = mnt_want_write_file(filp); + if (ret) + return ret; + + f2fs_balance_fs(F2FS_I_SB(inode), true); + + inode_lock(inode); + + writecount = atomic_read(&inode->i_writecount); + if ((filp->f_mode & FMODE_WRITE && writecount != 1) || writecount) { + ret = -EBUSY; + goto out; + } + + if (IS_IMMUTABLE(inode)) { + ret = -EINVAL; + goto out; + } + + ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX); + if (ret) + goto out; + + if (!F2FS_I(inode)->i_compr_blocks) + goto out; + + F2FS_I(inode)->i_flags |= F2FS_IMMUTABLE_FL; + f2fs_set_inode_flags(inode); + inode->i_ctime = current_time(inode); + f2fs_mark_inode_dirty_sync(inode, true); + + down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); + down_write(&F2FS_I(inode)->i_mmap_sem); + + last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); + + while (page_idx < last_idx) { + struct dnode_of_data dn; + pgoff_t end_offset, count; + + set_new_dnode(&dn, inode, NULL, NULL, 0); + ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE); + if (ret) { + if (ret == -ENOENT) { + page_idx = f2fs_get_next_page_offset(&dn, + page_idx); + ret = 0; + continue; + } + break; + } + + end_offset = ADDRS_PER_PAGE(dn.node_page, inode); + count = min(end_offset - dn.ofs_in_node, last_idx - page_idx); + count = round_up(count, F2FS_I(inode)->i_cluster_size); + + ret = release_compress_blocks(&dn, count); + + f2fs_put_dnode(&dn); + + if (ret < 0) + break; + + page_idx += count; + released_blocks += ret; + } + + up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); + up_write(&F2FS_I(inode)->i_mmap_sem); +out: + inode_unlock(inode); + + mnt_drop_write_file(filp); + + if (ret >= 0) { + ret = put_user(released_blocks, (u64 __user *)arg); + } else if (released_blocks && F2FS_I(inode)->i_compr_blocks) { + set_sbi_flag(sbi, SBI_NEED_FSCK); + f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx " + "iblocks=%llu, released=%u, compr_blocks=%llu, " + "run fsck to fix.", + __func__, inode->i_ino, (u64)inode->i_blocks, + released_blocks, + F2FS_I(inode)->i_compr_blocks); + } + + return ret; +} + +static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count) +{ + struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); + unsigned int reserved_blocks = 0; + int cluster_size = F2FS_I(dn->inode)->i_cluster_size; + block_t blkaddr; + int i; + + for (i = 0; i < count; i++) { + blkaddr = data_blkaddr(dn->inode, dn->node_page, + dn->ofs_in_node + i); + + if (!__is_valid_data_blkaddr(blkaddr)) + continue; + if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr, + DATA_GENERIC_ENHANCE))) + return -EFSCORRUPTED; + } + + while (count) { + int compr_blocks = 0; + blkcnt_t reserved; + int ret; + + for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) { + blkaddr = f2fs_data_blkaddr(dn); + + if (i == 0) { + if (blkaddr == COMPRESS_ADDR) + continue; + dn->ofs_in_node += cluster_size; + goto next; + } + + if (__is_valid_data_blkaddr(blkaddr)) { + compr_blocks++; + continue; + } + + dn->data_blkaddr = NEW_ADDR; + f2fs_set_data_blkaddr(dn); + } + + reserved = cluster_size - compr_blocks; + ret = inc_valid_block_count(sbi, dn->inode, &reserved); + if (ret) + return ret; + + if (reserved != cluster_size - compr_blocks) + return -ENOSPC; + + f2fs_i_compr_blocks_update(dn->inode, compr_blocks, true); + + reserved_blocks += reserved; +next: + count -= cluster_size; + } + + return reserved_blocks; +} + +static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg) +{ + struct inode *inode = file_inode(filp); + struct f2fs_sb_info *sbi = F2FS_I_SB(inode); + pgoff_t page_idx = 0, last_idx; + unsigned int reserved_blocks = 0; + int ret; + + if (!f2fs_sb_has_compression(F2FS_I_SB(inode))) + return -EOPNOTSUPP; + + if (!f2fs_compressed_file(inode)) + return -EINVAL; + + if (f2fs_readonly(sbi->sb)) + return -EROFS; + + ret = mnt_want_write_file(filp); + if (ret) + return ret; + + if (F2FS_I(inode)->i_compr_blocks) + goto out; + + f2fs_balance_fs(F2FS_I_SB(inode), true); + + inode_lock(inode); + + if (!IS_IMMUTABLE(inode)) { + ret = -EINVAL; + goto unlock_inode; + } + + down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); + down_write(&F2FS_I(inode)->i_mmap_sem); + + last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); + + while (page_idx < last_idx) { + struct dnode_of_data dn; + pgoff_t end_offset, count; + + set_new_dnode(&dn, inode, NULL, NULL, 0); + ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE); + if (ret) { + if (ret == -ENOENT) { + page_idx = f2fs_get_next_page_offset(&dn, + page_idx); + ret = 0; + continue; + } + break; + } + + end_offset = ADDRS_PER_PAGE(dn.node_page, inode); + count = min(end_offset - dn.ofs_in_node, last_idx - page_idx); + count = round_up(count, F2FS_I(inode)->i_cluster_size); + + ret = reserve_compress_blocks(&dn, count); + + f2fs_put_dnode(&dn); + + if (ret < 0) + break; + + page_idx += count; + reserved_blocks += ret; + } + + up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); + up_write(&F2FS_I(inode)->i_mmap_sem); + + if (ret >= 0) { + F2FS_I(inode)->i_flags &= ~F2FS_IMMUTABLE_FL; + f2fs_set_inode_flags(inode); + inode->i_ctime = current_time(inode); + f2fs_mark_inode_dirty_sync(inode, true); + } +unlock_inode: + inode_unlock(inode); +out: + mnt_drop_write_file(filp); + + if (ret >= 0) { + ret = put_user(reserved_blocks, (u64 __user *)arg); + } else if (reserved_blocks && F2FS_I(inode)->i_compr_blocks) { + set_sbi_flag(sbi, SBI_NEED_FSCK); + f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx " + "iblocks=%llu, reserved=%u, compr_blocks=%llu, " + "run fsck to fix.", + __func__, inode->i_ino, (u64)inode->i_blocks, + reserved_blocks, + F2FS_I(inode)->i_compr_blocks); + } + + return ret; +} + long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp))))) @@ -3453,6 +3786,10 @@ long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) return f2fs_ioc_measure_verity(filp, arg); case F2FS_IOC_GET_COMPRESS_BLOCKS: return f2fs_get_compress_blocks(filp, arg); + case F2FS_IOC_RELEASE_COMPRESS_BLOCKS: + return f2fs_release_compress_blocks(filp, arg); + case F2FS_IOC_RESERVE_COMPRESS_BLOCKS: + return f2fs_reserve_compress_blocks(filp, arg); default: return -ENOTTY; } @@ -3617,6 +3954,8 @@ long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) case FS_IOC_ENABLE_VERITY: case FS_IOC_MEASURE_VERITY: case F2FS_IOC_GET_COMPRESS_BLOCKS: + case F2FS_IOC_RELEASE_COMPRESS_BLOCKS: + case F2FS_IOC_RESERVE_COMPRESS_BLOCKS: break; default: return -ENOIOCTLCMD; diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c index 0c03f7d05fa6f6cc6c4e4eeccd103ca98107bae3..4c0fd04fa98dce0ebbe46720ec7fcccacd2eaff6 100644 --- a/fs/f2fs/gc.c +++ b/fs/f2fs/gc.c @@ -13,6 +13,7 @@ #include #include #include +#include #include "f2fs.h" #include "node.h" @@ -739,6 +740,7 @@ static int ra_data_block(struct inode *inode, pgoff_t index) f2fs_put_page(page, 1); f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE); + f2fs_update_iostat(sbi, FS_GDATA_READ_IO, F2FS_BLKSIZE); return 0; put_encrypted_page: @@ -846,6 +848,7 @@ static int move_data_block(struct inode *inode, block_t bidx, } f2fs_update_iostat(fio.sbi, FS_DATA_READ_IO, F2FS_BLKSIZE); + f2fs_update_iostat(fio.sbi, FS_GDATA_READ_IO, F2FS_BLKSIZE); lock_page(mpage); if (unlikely(mpage->mapping != META_MAPPING(fio.sbi) || @@ -1406,12 +1409,29 @@ void f2fs_build_gc_manager(struct f2fs_sb_info *sbi) GET_SEGNO(sbi, FDEV(0).end_blk) + 1; } -static int free_segment_range(struct f2fs_sb_info *sbi, unsigned int start, - unsigned int end) +static int free_segment_range(struct f2fs_sb_info *sbi, + unsigned int secs, bool gc_only) { - int type; - unsigned int segno, next_inuse; + unsigned int segno, next_inuse, start, end; + struct cp_control cpc = { CP_RESIZE, 0, 0, 0 }; + int gc_mode, gc_type; int err = 0; + int type; + + /* Force block allocation for GC */ + MAIN_SECS(sbi) -= secs; + start = MAIN_SECS(sbi) * sbi->segs_per_sec; + end = MAIN_SEGS(sbi) - 1; + + mutex_lock(&DIRTY_I(sbi)->seglist_lock); + for (gc_mode = 0; gc_mode < MAX_GC_POLICY; gc_mode++) + if (SIT_I(sbi)->last_victim[gc_mode] >= start) + SIT_I(sbi)->last_victim[gc_mode] = 0; + + for (gc_type = BG_GC; gc_type <= FG_GC; gc_type++) + if (sbi->next_victim_seg[gc_type] >= start) + sbi->next_victim_seg[gc_type] = NULL_SEGNO; + mutex_unlock(&DIRTY_I(sbi)->seglist_lock); /* Move out cursegs from the target range */ for (type = CURSEG_HOT_DATA; type < NR_CURSEG_TYPE; type++) @@ -1424,18 +1444,24 @@ static int free_segment_range(struct f2fs_sb_info *sbi, unsigned int start, .iroot = RADIX_TREE_INIT(GFP_NOFS), }; - down_write(&sbi->gc_lock); do_garbage_collect(sbi, segno, &gc_list, FG_GC); - up_write(&sbi->gc_lock); put_gc_inode(&gc_list); - if (get_valid_blocks(sbi, segno, true)) - return -EAGAIN; + if (!gc_only && get_valid_blocks(sbi, segno, true)) { + err = -EAGAIN; + goto out; + } + if (fatal_signal_pending(current)) { + err = -ERESTARTSYS; + goto out; + } } + if (gc_only) + goto out; - err = f2fs_sync_fs(sbi->sb, 1); + err = f2fs_write_checkpoint(sbi, &cpc); if (err) - return err; + goto out; next_inuse = find_next_inuse(FREE_I(sbi), end + 1, start); if (next_inuse <= end) { @@ -1443,6 +1469,8 @@ static int free_segment_range(struct f2fs_sb_info *sbi, unsigned int start, next_inuse); f2fs_bug_on(sbi, 1); } +out: + MAIN_SECS(sbi) += secs; return err; } @@ -1488,6 +1516,7 @@ static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs) SM_I(sbi)->segment_count = (int)SM_I(sbi)->segment_count + segs; MAIN_SEGS(sbi) = (int)MAIN_SEGS(sbi) + segs; + MAIN_SECS(sbi) += secs; FREE_I(sbi)->free_sections = (int)FREE_I(sbi)->free_sections + secs; FREE_I(sbi)->free_segments = (int)FREE_I(sbi)->free_segments + segs; F2FS_CKPT(sbi)->user_block_count = cpu_to_le64(user_block_count + blks); @@ -1509,8 +1538,8 @@ static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs) int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count) { __u64 old_block_count, shrunk_blocks; + struct cp_control cpc = { CP_RESIZE, 0, 0, 0 }; unsigned int secs; - int gc_mode, gc_type; int err = 0; __u32 rem; @@ -1545,10 +1574,27 @@ int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count) return -EINVAL; } - freeze_bdev(sbi->sb->s_bdev); - shrunk_blocks = old_block_count - block_count; secs = div_u64(shrunk_blocks, BLKS_PER_SEC(sbi)); + + /* stop other GC */ + if (!down_write_trylock(&sbi->gc_lock)) + return -EAGAIN; + + /* stop CP to protect MAIN_SEC in free_segment_range */ + f2fs_lock_op(sbi); + err = free_segment_range(sbi, secs, true); + f2fs_unlock_op(sbi); + up_write(&sbi->gc_lock); + if (err) + return err; + + set_sbi_flag(sbi, SBI_IS_RESIZEFS); + + freeze_super(sbi->sb); + down_write(&sbi->gc_lock); + mutex_lock(&sbi->cp_mutex); + spin_lock(&sbi->stat_lock); if (shrunk_blocks + valid_user_blocks(sbi) + sbi->current_reserved_blocks + sbi->unusable_block_count + @@ -1557,69 +1603,44 @@ int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count) else sbi->user_block_count -= shrunk_blocks; spin_unlock(&sbi->stat_lock); - if (err) { - thaw_bdev(sbi->sb->s_bdev, sbi->sb); - return err; - } - - mutex_lock(&sbi->resize_mutex); - set_sbi_flag(sbi, SBI_IS_RESIZEFS); - - mutex_lock(&DIRTY_I(sbi)->seglist_lock); - - MAIN_SECS(sbi) -= secs; - - for (gc_mode = 0; gc_mode < MAX_GC_POLICY; gc_mode++) - if (SIT_I(sbi)->last_victim[gc_mode] >= - MAIN_SECS(sbi) * sbi->segs_per_sec) - SIT_I(sbi)->last_victim[gc_mode] = 0; - - for (gc_type = BG_GC; gc_type <= FG_GC; gc_type++) - if (sbi->next_victim_seg[gc_type] >= - MAIN_SECS(sbi) * sbi->segs_per_sec) - sbi->next_victim_seg[gc_type] = NULL_SEGNO; - - mutex_unlock(&DIRTY_I(sbi)->seglist_lock); + if (err) + goto out_err; - err = free_segment_range(sbi, MAIN_SECS(sbi) * sbi->segs_per_sec, - MAIN_SEGS(sbi) - 1); + err = free_segment_range(sbi, secs, false); if (err) - goto out; + goto recover_out; update_sb_metadata(sbi, -secs); err = f2fs_commit_super(sbi, false); if (err) { update_sb_metadata(sbi, secs); - goto out; + goto recover_out; } - mutex_lock(&sbi->cp_mutex); update_fs_metadata(sbi, -secs); clear_sbi_flag(sbi, SBI_IS_RESIZEFS); set_sbi_flag(sbi, SBI_IS_DIRTY); - mutex_unlock(&sbi->cp_mutex); - err = f2fs_sync_fs(sbi->sb, 1); + err = f2fs_write_checkpoint(sbi, &cpc); if (err) { - mutex_lock(&sbi->cp_mutex); update_fs_metadata(sbi, secs); - mutex_unlock(&sbi->cp_mutex); update_sb_metadata(sbi, secs); f2fs_commit_super(sbi, false); } -out: +recover_out: if (err) { set_sbi_flag(sbi, SBI_NEED_FSCK); f2fs_err(sbi, "resize_fs failed, should run fsck to repair!"); - MAIN_SECS(sbi) += secs; spin_lock(&sbi->stat_lock); sbi->user_block_count += shrunk_blocks; spin_unlock(&sbi->stat_lock); } +out_err: + mutex_unlock(&sbi->cp_mutex); + up_write(&sbi->gc_lock); + thaw_super(sbi->sb); clear_sbi_flag(sbi, SBI_IS_RESIZEFS); - mutex_unlock(&sbi->resize_mutex); - thaw_bdev(sbi->sb->s_bdev, sbi->sb); return err; } diff --git a/fs/f2fs/gc.h b/fs/f2fs/gc.h index bbac9d3787bd35e4bbdfb1a2ea65853148d1e6a2..db3c61046aa425376b3a0b2a821fc9d26b234025 100644 --- a/fs/f2fs/gc.h +++ b/fs/f2fs/gc.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0 */ /* * fs/f2fs/gc.h * diff --git a/fs/f2fs/hash.c b/fs/f2fs/hash.c index 8f7ee4362312b3398d79f1a36ad459f8134ee1aa..f9b706495d1d62af49adced03ed0861788ba86e2 100644 --- a/fs/f2fs/hash.c +++ b/fs/f2fs/hash.c @@ -68,28 +68,9 @@ static void str2hashbuf(const unsigned char *msg, size_t len, *buf++ = pad; } -static f2fs_hash_t __f2fs_dentry_hash(const struct inode *dir, - const struct qstr *name_info, - const struct fscrypt_name *fname) +static u32 TEA_hash_name(const u8 *p, size_t len) { - __u32 hash; - f2fs_hash_t f2fs_hash; - const unsigned char *p; __u32 in[8], buf[4]; - const unsigned char *name = name_info->name; - size_t len = name_info->len; - - /* encrypted bigname case */ - if (fname && fname->is_ciphertext_name) - return cpu_to_le32(fname->hash); - - if (is_dot_dotdot(name_info)) - return 0; - - if (IS_CASEFOLDED(dir) && IS_ENCRYPTED(dir)) { - f2fs_hash = cpu_to_le32(fscrypt_fname_siphash(dir, name_info)); - return f2fs_hash; - } /* Initialize the default seed for the hash checksum functions */ buf[0] = 0x67452301; @@ -97,7 +78,6 @@ static f2fs_hash_t __f2fs_dentry_hash(const struct inode *dir, buf[2] = 0x98badcfe; buf[3] = 0x10325476; - p = name; while (1) { str2hashbuf(p, len, in, 4); TEA_transform(buf, in); @@ -106,44 +86,52 @@ static f2fs_hash_t __f2fs_dentry_hash(const struct inode *dir, break; len -= 16; } - hash = buf[0]; - f2fs_hash = cpu_to_le32(hash & ~F2FS_HASH_COL_BIT); - return f2fs_hash; + return buf[0] & ~F2FS_HASH_COL_BIT; } -f2fs_hash_t f2fs_dentry_hash(const struct inode *dir, - const struct qstr *name_info, const struct fscrypt_name *fname) +/* + * Compute @fname->hash. For all directories, @fname->disk_name must be set. + * For casefolded directories, @fname->usr_fname must be set, and also + * @fname->cf_name if the filename is valid Unicode. + */ +void f2fs_hash_filename(const struct inode *dir, struct f2fs_filename *fname) { -#ifdef CONFIG_UNICODE - struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb); - const struct unicode_map *um = dir->i_sb->s_encoding; - int r, dlen; - unsigned char *buff; - struct qstr folded; - const struct qstr *name = fname ? fname->usr_fname : name_info; - - if (!name_info->len || !IS_CASEFOLDED(dir)) - goto opaque_seq; - - if (IS_ENCRYPTED(dir) && !fscrypt_has_encryption_key(dir)) - goto opaque_seq; - - buff = f2fs_kzalloc(sbi, sizeof(char) * PATH_MAX, GFP_KERNEL); - if (!buff) - return -ENOMEM; - dlen = utf8_casefold(um, name, buff, PATH_MAX); - if (dlen < 0) { - kvfree(buff); - goto opaque_seq; + const u8 *name = fname->disk_name.name; + size_t len = fname->disk_name.len; + + WARN_ON_ONCE(!name); + + if (is_dot_dotdot(name, len)) { + fname->hash = 0; + return; } - folded.name = buff; - folded.len = dlen; - r = __f2fs_dentry_hash(dir, &folded, fname); - kvfree(buff); - return r; +#ifdef CONFIG_UNICODE + if (IS_CASEFOLDED(dir)) { + /* + * If the casefolded name is provided, hash it instead of the + * on-disk name. If the casefolded name is *not* provided, that + * should only be because the name wasn't valid Unicode, so fall + * back to treating the name as an opaque byte sequence. Note + * that to handle encrypted directories, the fallback must use + * usr_fname (plaintext) rather than disk_name (ciphertext). + */ + WARN_ON_ONCE(!fname->usr_fname->name); + if (fname->cf_name.name) { + name = fname->cf_name.name; + len = fname->cf_name.len; + } else { + name = fname->usr_fname->name; + len = fname->usr_fname->len; + } + if (IS_ENCRYPTED(dir)) { + struct qstr tmp = QSTR_INIT(name, len); -opaque_seq: + fname->hash = + cpu_to_le32(fscrypt_fname_siphash(dir, &tmp)); + return; + } + } #endif - return __f2fs_dentry_hash(dir, name_info, fname); + fname->hash = cpu_to_le32(TEA_hash_name(name, len)); } diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c index b01e0ac34f8c84d40fcc8084fa94a2de02eecea8..5676c03067a7f19a065cb15388e673757c244b9e 100644 --- a/fs/f2fs/inline.c +++ b/fs/f2fs/inline.c @@ -322,15 +322,14 @@ bool f2fs_recover_inline_data(struct inode *inode, struct page *npage) } struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir, - struct fscrypt_name *fname, struct page **res_page) + const struct f2fs_filename *fname, + struct page **res_page) { struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb); - struct qstr name = FSTR_TO_QSTR(&fname->disk_name); struct f2fs_dir_entry *de; struct f2fs_dentry_ptr d; struct page *ipage; void *inline_dentry; - f2fs_hash_t namehash; ipage = f2fs_get_node_page(sbi, dir->i_ino); if (IS_ERR(ipage)) { @@ -338,12 +337,10 @@ struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir, return NULL; } - namehash = f2fs_dentry_hash(dir, &name, fname); - inline_dentry = inline_data_addr(dir, ipage); make_dentry_ptr_inline(dir, &d, inline_dentry); - de = f2fs_find_target_dentry(fname, namehash, NULL, &d); + de = f2fs_find_target_dentry(&d, fname, NULL); unlock_page(ipage); if (de) *res_page = ipage; @@ -460,7 +457,7 @@ static int f2fs_add_inline_entries(struct inode *dir, void *inline_dentry) while (bit_pos < d.max) { struct f2fs_dir_entry *de; - struct qstr new_name; + struct f2fs_filename fname; nid_t ino; umode_t fake_mode; @@ -476,14 +473,19 @@ static int f2fs_add_inline_entries(struct inode *dir, void *inline_dentry) continue; } - new_name.name = d.filename[bit_pos]; - new_name.len = le16_to_cpu(de->name_len); + /* + * We only need the disk_name and hash to move the dentry. + * We don't need the original or casefolded filenames. + */ + memset(&fname, 0, sizeof(fname)); + fname.disk_name.name = d.filename[bit_pos]; + fname.disk_name.len = le16_to_cpu(de->name_len); + fname.hash = de->hash_code; ino = le32_to_cpu(de->ino); fake_mode = f2fs_get_de_type(de) << S_SHIFT; - err = f2fs_add_regular_entry(dir, &new_name, NULL, - de->hash_code, NULL, ino, fake_mode); + err = f2fs_add_regular_entry(dir, &fname, NULL, ino, fake_mode); if (err) goto punch_dentry_pages; @@ -560,7 +562,7 @@ int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry) { struct f2fs_sb_info *sbi = F2FS_I_SB(dir); struct page *ipage; - struct fscrypt_name fname; + struct f2fs_filename fname; void *inline_dentry = NULL; int err = 0; @@ -569,19 +571,19 @@ int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry) f2fs_lock_op(sbi); - err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &fname); + err = f2fs_setup_filename(dir, &dentry->d_name, 0, &fname); if (err) goto out; ipage = f2fs_get_node_page(sbi, dir->i_ino); if (IS_ERR(ipage)) { err = PTR_ERR(ipage); - goto out; + goto out_fname; } if (f2fs_has_enough_room(dir, ipage, &fname)) { f2fs_put_page(ipage, 1); - goto out; + goto out_fname; } inline_dentry = inline_data_addr(dir, ipage); @@ -589,24 +591,23 @@ int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry) err = do_convert_inline_dir(dir, ipage, inline_dentry); if (!err) f2fs_put_page(ipage, 1); +out_fname: + f2fs_free_filename(&fname); out: f2fs_unlock_op(sbi); return err; } -int f2fs_add_inline_entry(struct inode *dir, const struct qstr *new_name, - const struct fscrypt_name *fname, - struct inode *inode, nid_t ino, umode_t mode) +int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname, + struct inode *inode, nid_t ino, umode_t mode) { struct f2fs_sb_info *sbi = F2FS_I_SB(dir); struct page *ipage; unsigned int bit_pos; - f2fs_hash_t name_hash; void *inline_dentry = NULL; struct f2fs_dentry_ptr d; - int slots = GET_DENTRY_SLOTS(new_name->len); + int slots = GET_DENTRY_SLOTS(fname->disk_name.len); struct page *page = NULL; - const struct qstr *orig_name = fname->usr_fname; int err = 0; ipage = f2fs_get_node_page(sbi, dir->i_ino); @@ -627,8 +628,7 @@ int f2fs_add_inline_entry(struct inode *dir, const struct qstr *new_name, if (inode) { down_write(&F2FS_I(inode)->i_sem); - page = f2fs_init_inode_metadata(inode, dir, new_name, - orig_name, ipage); + page = f2fs_init_inode_metadata(inode, dir, fname, ipage); if (IS_ERR(page)) { err = PTR_ERR(page); goto fail; @@ -637,8 +637,8 @@ int f2fs_add_inline_entry(struct inode *dir, const struct qstr *new_name, f2fs_wait_on_page_writeback(ipage, NODE, true, true); - name_hash = f2fs_dentry_hash(dir, new_name, fname); - f2fs_update_dentry(ino, mode, &d, new_name, name_hash, bit_pos); + f2fs_update_dentry(ino, mode, &d, &fname->disk_name, fname->hash, + bit_pos); set_page_dirty(ipage); diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c index a8959c64bf3a5efe3ff71dbc806c2f08d29c20f0..21145281983fa3b60e421ebd12697ca4cbb93e04 100644 --- a/fs/f2fs/namei.c +++ b/fs/f2fs/namei.c @@ -482,7 +482,7 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry, nid_t ino = -1; int err = 0; unsigned int root_ino = F2FS_ROOT_INO(F2FS_I_SB(dir)); - struct fscrypt_name fname; + struct f2fs_filename fname; trace_f2fs_lookup_start(dir, dentry, flags); @@ -491,20 +491,21 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry, goto out; } - err = fscrypt_prepare_lookup(dir, dentry, &fname); + err = f2fs_prepare_lookup(dir, dentry, &fname); generic_set_encrypted_ci_d_ops(dir, dentry); if (err == -ENOENT) goto out_splice; if (err) goto out; de = __f2fs_find_entry(dir, &fname, &page); - fscrypt_free_filename(&fname); + f2fs_free_filename(&fname); if (!de) { if (IS_ERR(page)) { err = PTR_ERR(page); goto out; } + err = -ENOENT; goto out_splice; } @@ -550,7 +551,7 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry, #endif new = d_splice_alias(inode, dentry); err = PTR_ERR_OR_ZERO(new); - trace_f2fs_lookup_end(dir, dentry, ino, err); + trace_f2fs_lookup_end(dir, dentry, ino, !new ? -ENOENT : err); return new; out_iput: iput(inode); @@ -565,7 +566,7 @@ static int f2fs_unlink(struct inode *dir, struct dentry *dentry) struct inode *inode = d_inode(dentry); struct f2fs_dir_entry *de; struct page *page; - int err = -ENOENT; + int err; trace_f2fs_unlink_enter(dir, dentry); @@ -1288,9 +1289,7 @@ const struct inode_operations f2fs_encrypted_symlink_inode_operations = { .get_link = f2fs_encrypted_get_link, .getattr = f2fs_getattr, .setattr = f2fs_setattr, -#ifdef CONFIG_F2FS_FS_XATTR .listxattr = f2fs_listxattr, -#endif }; const struct inode_operations f2fs_dir_inode_operations = { @@ -1308,9 +1307,7 @@ const struct inode_operations f2fs_dir_inode_operations = { .setattr = f2fs_setattr, .get_acl = f2fs_get_acl, .set_acl = f2fs_set_acl, -#ifdef CONFIG_F2FS_FS_XATTR .listxattr = f2fs_listxattr, -#endif .fiemap = f2fs_fiemap, }; @@ -1318,9 +1315,7 @@ const struct inode_operations f2fs_symlink_inode_operations = { .get_link = f2fs_get_link, .getattr = f2fs_getattr, .setattr = f2fs_setattr, -#ifdef CONFIG_F2FS_FS_XATTR .listxattr = f2fs_listxattr, -#endif }; const struct inode_operations f2fs_special_inode_operations = { @@ -1328,7 +1323,5 @@ const struct inode_operations f2fs_special_inode_operations = { .setattr = f2fs_setattr, .get_acl = f2fs_get_acl, .set_acl = f2fs_set_acl, -#ifdef CONFIG_F2FS_FS_XATTR .listxattr = f2fs_listxattr, -#endif }; diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index 1e80fa16fb01be745dd915968c93cd073aca532d..006c4f9bad3493775ecdc06e42a6d083441f73a9 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -1518,8 +1518,15 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted, trace_f2fs_writepage(page, NODE); - if (unlikely(f2fs_cp_error(sbi))) + if (unlikely(f2fs_cp_error(sbi))) { + if (is_sbi_flag_set(sbi, SBI_IS_CLOSE)) { + ClearPageUptodate(page); + dec_page_count(sbi, F2FS_DIRTY_NODES); + unlock_page(page); + return 0; + } goto redirty_out; + } if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) goto redirty_out; @@ -1805,6 +1812,53 @@ static bool flush_dirty_inode(struct page *page) return true; } +int f2fs_flush_inline_data(struct f2fs_sb_info *sbi) +{ + pgoff_t index = 0; + struct pagevec pvec; + int nr_pages; + int ret = 0; + + pagevec_init(&pvec, 0); + + while ((nr_pages = pagevec_lookup_tag(&pvec, + NODE_MAPPING(sbi), &index, PAGECACHE_TAG_DIRTY))) { + int i; + + for (i = 0; i < nr_pages; i++) { + struct page *page = pvec.pages[i]; + + if (!IS_DNODE(page)) + continue; + + lock_page(page); + + if (unlikely(page->mapping != NODE_MAPPING(sbi))) { +continue_unlock: + unlock_page(page); + continue; + } + + if (!PageDirty(page)) { + /* someone wrote it for us */ + goto continue_unlock; + } + + /* flush inline_data, if it's async context. */ + if (is_inline_node(page)) { + clear_inline_node(page); + unlock_page(page); + flush_inline_data(sbi, ino_of_node(page)); + continue; + } + unlock_page(page); + } + pagevec_release(&pvec); + cond_resched(); + } + return ret; +} + int f2fs_sync_node_pages(struct f2fs_sb_info *sbi, struct writeback_control *wbc, bool do_balance, enum iostat_type io_type) @@ -1868,8 +1922,8 @@ int f2fs_sync_node_pages(struct f2fs_sb_info *sbi, goto continue_unlock; } - /* flush inline_data */ - if (is_inline_node(page)) { + /* flush inline_data, if it's async context. */ + if (do_balance && is_inline_node(page)) { clear_inline_node(page); unlock_page(page); flush_inline_data(sbi, ino_of_node(page)); @@ -2481,7 +2535,6 @@ void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid) int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink) { struct f2fs_nm_info *nm_i = NM_I(sbi); - struct free_nid *i, *next; int nr = nr_shrink; if (nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS) @@ -2490,17 +2543,23 @@ int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink) if (!mutex_trylock(&nm_i->build_lock)) return 0; - spin_lock(&nm_i->nid_list_lock); - list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) { - if (nr_shrink <= 0 || - nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS) - break; + while (nr_shrink && nm_i->nid_cnt[FREE_NID] > MAX_FREE_NIDS) { + struct free_nid *i, *next; + unsigned int batch = SHRINK_NID_BATCH_SIZE; - __remove_free_nid(sbi, i, FREE_NID); - kmem_cache_free(free_nid_slab, i); - nr_shrink--; + spin_lock(&nm_i->nid_list_lock); + list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) { + if (!nr_shrink || !batch || + nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS) + break; + __remove_free_nid(sbi, i, FREE_NID); + kmem_cache_free(free_nid_slab, i); + nr_shrink--; + batch--; + } + spin_unlock(&nm_i->nid_list_lock); } - spin_unlock(&nm_i->nid_list_lock); + mutex_unlock(&nm_i->build_lock); return nr - nr_shrink; @@ -2925,7 +2984,7 @@ static int __get_nat_bitmaps(struct f2fs_sb_info *sbi) return 0; nm_i->nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8); - nm_i->nat_bits = f2fs_kzalloc(sbi, + nm_i->nat_bits = f2fs_kvzalloc(sbi, nm_i->nat_bits_blocks << F2FS_BLKSIZE_BITS, GFP_KERNEL); if (!nm_i->nat_bits) return -ENOMEM; @@ -3058,9 +3117,9 @@ static int init_free_nid_cache(struct f2fs_sb_info *sbi) int i; nm_i->free_nid_bitmap = - f2fs_kzalloc(sbi, array_size(sizeof(unsigned char *), - nm_i->nat_blocks), - GFP_KERNEL); + f2fs_kvzalloc(sbi, array_size(sizeof(unsigned char *), + nm_i->nat_blocks), + GFP_KERNEL); if (!nm_i->free_nid_bitmap) return -ENOMEM; diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h index e05af5df56485eff71c95f58b5d3f877cbcb8e53..69e5859e993cf754f69971f2916eb0e13ba48d9c 100644 --- a/fs/f2fs/node.h +++ b/fs/f2fs/node.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0 */ /* * fs/f2fs/node.h * @@ -15,6 +15,9 @@ #define FREE_NID_PAGES 8 #define MAX_FREE_NIDS (NAT_ENTRY_PER_BLOCK * FREE_NID_PAGES) +/* size of free nid batch when shrinking */ +#define SHRINK_NID_BATCH_SIZE 8 + #define DEF_RA_NID_PAGES 0 /* # of nid pages to be readaheaded */ /* maximum readahead size for node during getting data blocks */ diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c index 5288a6f71ca24b3353eb2ed63e594ab614d133c3..10e302c0bb099b76ce4b954e6b3fbea90731be09 100644 --- a/fs/f2fs/recovery.c +++ b/fs/f2fs/recovery.c @@ -5,6 +5,7 @@ * Copyright (c) 2012 Samsung Electronics Co., Ltd. * http://www.samsung.com/ */ +#include #include #include #include "f2fs.h" @@ -107,13 +108,60 @@ static void del_fsync_inode(struct fsync_inode_entry *entry, int drop) kmem_cache_free(fsync_entry_slab, entry); } +static int init_recovered_filename(const struct inode *dir, + struct f2fs_inode *raw_inode, + struct f2fs_filename *fname, + struct qstr *usr_fname) +{ + int err; + + memset(fname, 0, sizeof(*fname)); + fname->disk_name.len = le32_to_cpu(raw_inode->i_namelen); + fname->disk_name.name = raw_inode->i_name; + + if (WARN_ON(fname->disk_name.len > F2FS_NAME_LEN)) + return -ENAMETOOLONG; + + if (!IS_ENCRYPTED(dir)) { + usr_fname->name = fname->disk_name.name; + usr_fname->len = fname->disk_name.len; + fname->usr_fname = usr_fname; + } + + /* Compute the hash of the filename */ + if (IS_ENCRYPTED(dir) && IS_CASEFOLDED(dir)) { + /* + * In this case the hash isn't computable without the key, so it + * was saved on-disk. + */ + if (fname->disk_name.len + sizeof(f2fs_hash_t) > F2FS_NAME_LEN) + return -EINVAL; + fname->hash = get_unaligned((f2fs_hash_t *) + &raw_inode->i_name[fname->disk_name.len]); + } else if (IS_CASEFOLDED(dir)) { + err = f2fs_init_casefolded_name(dir, fname); + if (err) + return err; + f2fs_hash_filename(dir, fname); +#ifdef CONFIG_UNICODE + /* Case-sensitive match is fine for recovery */ + kfree(fname->cf_name.name); + fname->cf_name.name = NULL; +#endif + } else { + f2fs_hash_filename(dir, fname); + } + return 0; +} + static int recover_dentry(struct inode *inode, struct page *ipage, struct list_head *dir_list) { struct f2fs_inode *raw_inode = F2FS_INODE(ipage); nid_t pino = le32_to_cpu(raw_inode->i_pino); struct f2fs_dir_entry *de; - struct fscrypt_name fname; + struct f2fs_filename fname; + struct qstr usr_fname; struct page *page; struct inode *dir, *einode; struct fsync_inode_entry *entry; @@ -132,16 +180,9 @@ static int recover_dentry(struct inode *inode, struct page *ipage, } dir = entry->inode; - - memset(&fname, 0, sizeof(struct fscrypt_name)); - fname.disk_name.len = le32_to_cpu(raw_inode->i_namelen); - fname.disk_name.name = raw_inode->i_name; - - if (unlikely(fname.disk_name.len > F2FS_NAME_LEN)) { - WARN_ON(1); - err = -ENAMETOOLONG; + err = init_recovered_filename(dir, raw_inode, &fname, &usr_fname); + if (err) goto out; - } retry: de = __f2fs_find_entry(dir, &fname, &page); if (de && inode->i_ino == le32_to_cpu(de->ino)) diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index 6da162a71105a9ecc3494d765da014e300396482..017daaa26b503c98c36251aba518248e87badfad 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -1219,7 +1219,7 @@ static int __submit_discard_cmd(struct f2fs_sb_info *sbi, return err; } -static struct discard_cmd *__insert_discard_tree(struct f2fs_sb_info *sbi, +static void __insert_discard_tree(struct f2fs_sb_info *sbi, struct block_device *bdev, block_t lstart, block_t start, block_t len, struct rb_node **insert_p, @@ -1228,7 +1228,6 @@ static struct discard_cmd *__insert_discard_tree(struct f2fs_sb_info *sbi, struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; struct rb_node **p; struct rb_node *parent = NULL; - struct discard_cmd *dc = NULL; bool leftmost = true; if (insert_p && insert_parent) { @@ -1240,12 +1239,8 @@ static struct discard_cmd *__insert_discard_tree(struct f2fs_sb_info *sbi, p = f2fs_lookup_rb_tree_for_insert(sbi, &dcc->root, &parent, lstart, &leftmost); do_insert: - dc = __attach_discard_cmd(sbi, bdev, lstart, start, len, parent, + __attach_discard_cmd(sbi, bdev, lstart, start, len, parent, p, leftmost); - if (!dc) - return NULL; - - return dc; } static void __relocate_discard_cmd(struct discard_cmd_control *dcc, @@ -3109,6 +3104,14 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, type = CURSEG_COLD_DATA; } + /* + * We need to wait for node_write to avoid block allocation during + * checkpoint. This can only happen to quota writes which can cause + * the below discard race condition. + */ + if (IS_DATASEG(type)) + down_write(&sbi->node_write); + down_read(&SM_I(sbi)->curseg_lock); mutex_lock(&curseg->curseg_mutex); @@ -3174,6 +3177,9 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, up_read(&SM_I(sbi)->curseg_lock); + if (IS_DATASEG(type)) + up_write(&sbi->node_write); + if (put_pin_sem) up_read(&sbi->pin_sem); } diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h index c476e7ca35957326ecbc29ba85d42895695e521f..1e6ff468fe6741efdfaf7f2ee6e700e4e4a1ef03 100644 --- a/fs/f2fs/segment.h +++ b/fs/f2fs/segment.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0 */ /* * fs/f2fs/segment.h * diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index fa0a4ae4cf9674b153d7e5898fa8dab81cea519c..2efaac635ec5a5a9b52e1cecff890fefd2253e96 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -202,6 +202,7 @@ static match_table_t f2fs_tokens = { {Opt_whint, "whint_mode=%s"}, {Opt_alloc, "alloc_mode=%s"}, {Opt_fsync, "fsync_mode=%s"}, + {Opt_test_dummy_encryption, "test_dummy_encryption=%s"}, {Opt_test_dummy_encryption, "test_dummy_encryption"}, {Opt_inlinecrypt, "inlinecrypt"}, {Opt_checkpoint_disable, "checkpoint=disable"}, @@ -285,6 +286,22 @@ static inline void limit_reserve_root(struct f2fs_sb_info *sbi) F2FS_OPTION(sbi).s_resgid)); } +static inline void adjust_unusable_cap_perc(struct f2fs_sb_info *sbi) +{ + if (!F2FS_OPTION(sbi).unusable_cap_perc) + return; + + if (F2FS_OPTION(sbi).unusable_cap_perc == 100) + F2FS_OPTION(sbi).unusable_cap = sbi->user_block_count; + else + F2FS_OPTION(sbi).unusable_cap = (sbi->user_block_count / 100) * + F2FS_OPTION(sbi).unusable_cap_perc; + + f2fs_info(sbi, "Adjust unusable cap for checkpoint=disable = %u / %u%%", + F2FS_OPTION(sbi).unusable_cap, + F2FS_OPTION(sbi).unusable_cap_perc); +} + static void init_once(void *foo) { struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo; @@ -395,7 +412,52 @@ static int f2fs_check_quota_options(struct f2fs_sb_info *sbi) } #endif -static int parse_options(struct super_block *sb, char *options) +static int f2fs_set_test_dummy_encryption(struct super_block *sb, + const char *opt, + const substring_t *arg, + bool is_remount) +{ + struct f2fs_sb_info *sbi = F2FS_SB(sb); +#ifdef CONFIG_FS_ENCRYPTION + int err; + + if (!f2fs_sb_has_encrypt(sbi)) { + f2fs_err(sbi, "Encrypt feature is off"); + return -EINVAL; + } + + /* + * This mount option is just for testing, and it's not worthwhile to + * implement the extra complexity (e.g. RCU protection) that would be + * needed to allow it to be set or changed during remount. We do allow + * it to be specified during remount, but only if there is no change. + */ + if (is_remount && !F2FS_OPTION(sbi).dummy_enc_ctx.ctx) { + f2fs_warn(sbi, "Can't set test_dummy_encryption on remount"); + return -EINVAL; + } + err = fscrypt_set_test_dummy_encryption( + sb, arg, &F2FS_OPTION(sbi).dummy_enc_ctx); + if (err) { + if (err == -EEXIST) + f2fs_warn(sbi, + "Can't change test_dummy_encryption on remount"); + else if (err == -EINVAL) + f2fs_warn(sbi, "Value of option \"%s\" is unrecognized", + opt); + else + f2fs_warn(sbi, "Error processing option \"%s\" [%d]", + opt, err); + return -EINVAL; + } + f2fs_warn(sbi, "Test dummy encryption mode enabled"); +#else + f2fs_warn(sbi, "Test dummy encryption mount option ignored"); +#endif + return 0; +} + +static int parse_options(struct super_block *sb, char *options, bool is_remount) { struct f2fs_sb_info *sbi = F2FS_SB(sb); substring_t args[MAX_OPT_ARGS]; @@ -404,9 +466,7 @@ static int parse_options(struct super_block *sb, char *options) int arg = 0, ext_cnt; kuid_t uid; kgid_t gid; -#ifdef CONFIG_QUOTA int ret; -#endif if (!options) return 0; @@ -428,11 +488,11 @@ static int parse_options(struct super_block *sb, char *options) if (!name) return -ENOMEM; - if (strlen(name) == 2 && !strncmp(name, "on", 2)) { + if (!strcmp(name, "on")) { F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_ON; - } else if (strlen(name) == 3 && !strncmp(name, "off", 3)) { + } else if (!strcmp(name, "off")) { F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_OFF; - } else if (strlen(name) == 4 && !strncmp(name, "sync", 4)) { + } else if (!strcmp(name, "sync")) { F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_SYNC; } else { kvfree(name); @@ -592,16 +652,14 @@ static int parse_options(struct super_block *sb, char *options) if (!name) return -ENOMEM; - if (strlen(name) == 8 && - !strncmp(name, "adaptive", 8)) { + if (!strcmp(name, "adaptive")) { if (f2fs_sb_has_blkzoned(sbi)) { f2fs_warn(sbi, "adaptive mode is not allowed with zoned block device feature"); kvfree(name); return -EINVAL; } F2FS_OPTION(sbi).fs_mode = FS_MODE_ADAPTIVE; - } else if (strlen(name) == 3 && - !strncmp(name, "lfs", 3)) { + } else if (!strcmp(name, "lfs")) { F2FS_OPTION(sbi).fs_mode = FS_MODE_LFS; } else { kvfree(name); @@ -726,14 +784,11 @@ static int parse_options(struct super_block *sb, char *options) name = match_strdup(&args[0]); if (!name) return -ENOMEM; - if (strlen(name) == 10 && - !strncmp(name, "user-based", 10)) { + if (!strcmp(name, "user-based")) { F2FS_OPTION(sbi).whint_mode = WHINT_MODE_USER; - } else if (strlen(name) == 3 && - !strncmp(name, "off", 3)) { + } else if (!strcmp(name, "off")) { F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF; - } else if (strlen(name) == 8 && - !strncmp(name, "fs-based", 8)) { + } else if (!strcmp(name, "fs-based")) { F2FS_OPTION(sbi).whint_mode = WHINT_MODE_FS; } else { kvfree(name); @@ -746,11 +801,9 @@ static int parse_options(struct super_block *sb, char *options) if (!name) return -ENOMEM; - if (strlen(name) == 7 && - !strncmp(name, "default", 7)) { + if (!strcmp(name, "default")) { F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT; - } else if (strlen(name) == 5 && - !strncmp(name, "reuse", 5)) { + } else if (!strcmp(name, "reuse")) { F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE; } else { kvfree(name); @@ -762,14 +815,11 @@ static int parse_options(struct super_block *sb, char *options) name = match_strdup(&args[0]); if (!name) return -ENOMEM; - if (strlen(name) == 5 && - !strncmp(name, "posix", 5)) { + if (!strcmp(name, "posix")) { F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX; - } else if (strlen(name) == 6 && - !strncmp(name, "strict", 6)) { + } else if (!strcmp(name, "strict")) { F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_STRICT; - } else if (strlen(name) == 9 && - !strncmp(name, "nobarrier", 9)) { + } else if (!strcmp(name, "nobarrier")) { F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_NOBARRIER; } else { @@ -779,17 +829,10 @@ static int parse_options(struct super_block *sb, char *options) kvfree(name); break; case Opt_test_dummy_encryption: -#ifdef CONFIG_FS_ENCRYPTION - if (!f2fs_sb_has_encrypt(sbi)) { - f2fs_err(sbi, "Encrypt feature is off"); - return -EINVAL; - } - - F2FS_OPTION(sbi).test_dummy_encryption = true; - f2fs_info(sbi, "Test dummy encryption mode enabled"); -#else - f2fs_info(sbi, "Test dummy encryption mount option ignored"); -#endif + ret = f2fs_set_test_dummy_encryption(sb, p, &args[0], + is_remount); + if (ret) + return ret; break; case Opt_inlinecrypt: #ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT @@ -803,12 +846,7 @@ static int parse_options(struct super_block *sb, char *options) return -EINVAL; if (arg < 0 || arg > 100) return -EINVAL; - if (arg == 100) - F2FS_OPTION(sbi).unusable_cap = - sbi->user_block_count; - else - F2FS_OPTION(sbi).unusable_cap = - (sbi->user_block_count / 100) * arg; + F2FS_OPTION(sbi).unusable_cap_perc = arg; set_opt(sbi, DISABLE_CHECKPOINT); break; case Opt_checkpoint_disable_cap: @@ -831,15 +869,13 @@ static int parse_options(struct super_block *sb, char *options) name = match_strdup(&args[0]); if (!name) return -ENOMEM; - if (strlen(name) == 3 && !strcmp(name, "lzo")) { + if (!strcmp(name, "lzo")) { F2FS_OPTION(sbi).compress_algorithm = COMPRESS_LZO; - } else if (strlen(name) == 3 && - !strcmp(name, "lz4")) { + } else if (!strcmp(name, "lz4")) { F2FS_OPTION(sbi).compress_algorithm = COMPRESS_LZ4; - } else if (strlen(name) == 4 && - !strcmp(name, "zstd")) { + } else if (!strcmp(name, "zstd")) { F2FS_OPTION(sbi).compress_algorithm = COMPRESS_ZSTD; } else { @@ -1250,6 +1286,7 @@ static void f2fs_put_super(struct super_block *sb) for (i = 0; i < MAXQUOTAS; i++) kvfree(F2FS_OPTION(sbi).s_qf_names[i]); #endif + fscrypt_free_dummy_context(&F2FS_OPTION(sbi).dummy_enc_ctx); destroy_percpu_info(sbi); for (i = 0; i < NR_PAGE_TYPE; i++) kvfree(sbi->write_io[i]); @@ -1329,7 +1366,8 @@ static int f2fs_statfs_project(struct super_block *sb, limit >>= sb->s_blocksize_bits; if (limit && buf->f_blocks > limit) { - curblock = dquot->dq_dqb.dqb_curspace >> sb->s_blocksize_bits; + curblock = (dquot->dq_dqb.dqb_curspace + + dquot->dq_dqb.dqb_rsvspace) >> sb->s_blocksize_bits; buf->f_blocks = limit; buf->f_bfree = buf->f_bavail = (buf->f_blocks > curblock) ? @@ -1580,9 +1618,10 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root) seq_printf(seq, ",whint_mode=%s", "user-based"); else if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_FS) seq_printf(seq, ",whint_mode=%s", "fs-based"); + + fscrypt_show_test_dummy_encryption(seq, ',', sbi->sb); + #ifdef CONFIG_FS_ENCRYPTION - if (F2FS_OPTION(sbi).test_dummy_encryption) - seq_puts(seq, ",test_dummy_encryption"); if (F2FS_OPTION(sbi).inlinecrypt) seq_puts(seq, ",inlinecrypt"); #endif @@ -1614,7 +1653,6 @@ static void default_options(struct f2fs_sb_info *sbi) F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF; F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT; F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX; - F2FS_OPTION(sbi).test_dummy_encryption = false; #ifdef CONFIG_FS_ENCRYPTION F2FS_OPTION(sbi).inlinecrypt = false; #endif @@ -1776,7 +1814,7 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data) default_options(sbi); /* parse mount options */ - err = parse_options(sb, data); + err = parse_options(sb, data, true); if (err) goto restore_opts; checkpoint_changed = @@ -1887,6 +1925,7 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data) (test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0); limit_reserve_root(sbi); + adjust_unusable_cap_perc(sbi); *flags = (*flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME); return 0; restore_gc: @@ -2453,9 +2492,10 @@ static int f2fs_set_context(struct inode *inode, const void *ctx, size_t len, ctx, len, fs_data, XATTR_CREATE); } -static bool f2fs_dummy_context(struct inode *inode) +static const union fscrypt_context * +f2fs_get_dummy_context(struct super_block *sb) { - return DUMMY_ENCRYPTION_ENABLED(F2FS_I_SB(inode)); + return F2FS_OPTION(F2FS_SB(sb)).dummy_enc_ctx.ctx; } static bool f2fs_has_stable_inodes(struct super_block *sb) @@ -2498,7 +2538,7 @@ static const struct fscrypt_operations f2fs_cryptops = { .key_prefix = "f2fs:", .get_context = f2fs_get_context, .set_context = f2fs_set_context, - .dummy_context = f2fs_dummy_context, + .get_dummy_context = f2fs_get_dummy_context, .empty_dir = f2fs_empty_dir, .max_namelen = F2FS_NAME_LEN, .has_stable_inodes = f2fs_has_stable_inodes, @@ -3090,7 +3130,7 @@ static int init_blkz_info(struct f2fs_sb_info *sbi, int devi) if (nr_sectors & (bdev_zone_sectors(bdev) - 1)) FDEV(devi).nr_blkz++; - FDEV(devi).blkz_seq = f2fs_kzalloc(sbi, + FDEV(devi).blkz_seq = f2fs_kvzalloc(sbi, BITS_TO_LONGS(FDEV(devi).nr_blkz) * sizeof(unsigned long), GFP_KERNEL); @@ -3450,7 +3490,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) goto free_sb_buf; } - err = parse_options(sb, options); + err = parse_options(sb, options, false); if (err) goto free_options; @@ -3497,7 +3537,6 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) init_rwsem(&sbi->gc_lock); mutex_init(&sbi->writepages); mutex_init(&sbi->cp_mutex); - mutex_init(&sbi->resize_mutex); init_rwsem(&sbi->node_write); init_rwsem(&sbi->node_change); @@ -3606,6 +3645,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) sbi->reserved_blocks = 0; sbi->current_reserved_blocks = 0; limit_reserve_root(sbi); + adjust_unusable_cap_perc(sbi); for (i = 0; i < NR_INODE_TYPE; i++) { INIT_LIST_HEAD(&sbi->inode_list[i]); @@ -3842,6 +3882,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) for (i = 0; i < MAXQUOTAS; i++) kvfree(F2FS_OPTION(sbi).s_qf_names[i]); #endif + fscrypt_free_dummy_context(&F2FS_OPTION(sbi).dummy_enc_ctx); kvfree(options); free_sb_buf: kvfree(raw_super); @@ -3963,7 +4004,12 @@ static int __init init_f2fs_fs(void) err = f2fs_init_bioset(); if (err) goto free_bio_enrty_cache; + err = f2fs_init_compress_mempool(); + if (err) + goto free_bioset; return 0; +free_bioset: + f2fs_destroy_bioset(); free_bio_enrty_cache: f2fs_destroy_bio_entry_cache(); free_post_read: @@ -3991,6 +4037,7 @@ static int __init init_f2fs_fs(void) static void __exit exit_f2fs_fs(void) { + f2fs_destroy_compress_mempool(); f2fs_destroy_bioset(); f2fs_destroy_bio_entry_cache(); f2fs_destroy_post_read_processing(); diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c index 70893a98c0e96772c51d3655c487f8b107437b3c..a647a2fa947757ef4e3ec69a71ffd32e7f455f25 100644 --- a/fs/f2fs/sysfs.c +++ b/fs/f2fs/sysfs.c @@ -457,6 +457,7 @@ enum feat_id { FEAT_SB_CHECKSUM, FEAT_CASEFOLD, FEAT_COMPRESSION, + FEAT_TEST_DUMMY_ENCRYPTION_V2, }; static ssize_t f2fs_feature_show(struct f2fs_attr *a, @@ -477,6 +478,7 @@ static ssize_t f2fs_feature_show(struct f2fs_attr *a, case FEAT_SB_CHECKSUM: case FEAT_CASEFOLD: case FEAT_COMPRESSION: + case FEAT_TEST_DUMMY_ENCRYPTION_V2: return sprintf(buf, "supported\n"); } return 0; @@ -556,6 +558,7 @@ F2FS_RW_ATTR(FAULT_INFO_RATE, f2fs_fault_info, inject_rate, inject_rate); F2FS_RW_ATTR(FAULT_INFO_TYPE, f2fs_fault_info, inject_type, inject_type); #endif F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, data_io_flag, data_io_flag); +F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, node_io_flag, node_io_flag); F2FS_GENERAL_RO_ATTR(dirty_segments); F2FS_GENERAL_RO_ATTR(free_segments); F2FS_GENERAL_RO_ATTR(lifetime_write_kbytes); @@ -576,6 +579,7 @@ F2FS_GENERAL_RO_ATTR(avg_vblocks); #ifdef CONFIG_FS_ENCRYPTION F2FS_FEATURE_RO_ATTR(encryption, FEAT_CRYPTO); +F2FS_FEATURE_RO_ATTR(test_dummy_encryption_v2, FEAT_TEST_DUMMY_ENCRYPTION_V2); #endif #ifdef CONFIG_BLK_DEV_ZONED F2FS_FEATURE_RO_ATTR(block_zoned, FEAT_BLKZONED); @@ -637,6 +641,7 @@ static struct attribute *f2fs_attrs[] = { ATTR_LIST(inject_type), #endif ATTR_LIST(data_io_flag), + ATTR_LIST(node_io_flag), ATTR_LIST(dirty_segments), ATTR_LIST(free_segments), ATTR_LIST(unusable), @@ -661,6 +666,7 @@ static struct attribute *f2fs_attrs[] = { static struct attribute *f2fs_feat_attrs[] = { #ifdef CONFIG_FS_ENCRYPTION ATTR_LIST(encryption), + ATTR_LIST(test_dummy_encryption_v2), #endif #ifdef CONFIG_BLK_DEV_ZONED ATTR_LIST(block_zoned), @@ -803,6 +809,7 @@ static int __maybe_unused iostat_info_seq_show(struct seq_file *seq, seq_printf(seq, "time: %-16llu\n", now); /* print app write IOs */ + seq_puts(seq, "[WRITE]\n"); seq_printf(seq, "app buffered: %-16llu\n", sbi->rw_iostat[APP_BUFFERED_IO]); seq_printf(seq, "app direct: %-16llu\n", @@ -829,6 +836,7 @@ static int __maybe_unused iostat_info_seq_show(struct seq_file *seq, sbi->rw_iostat[FS_CP_META_IO]); /* print app read IOs */ + seq_puts(seq, "[READ]\n"); seq_printf(seq, "app buffered: %-16llu\n", sbi->rw_iostat[APP_BUFFERED_READ_IO]); seq_printf(seq, "app direct: %-16llu\n", @@ -839,12 +847,17 @@ static int __maybe_unused iostat_info_seq_show(struct seq_file *seq, /* print fs read IOs */ seq_printf(seq, "fs data: %-16llu\n", sbi->rw_iostat[FS_DATA_READ_IO]); + seq_printf(seq, "fs gc data: %-16llu\n", + sbi->rw_iostat[FS_GDATA_READ_IO]); + seq_printf(seq, "fs compr_data: %-16llu\n", + sbi->rw_iostat[FS_CDATA_READ_IO]); seq_printf(seq, "fs node: %-16llu\n", sbi->rw_iostat[FS_NODE_READ_IO]); seq_printf(seq, "fs meta: %-16llu\n", sbi->rw_iostat[FS_META_READ_IO]); /* print other IOs */ + seq_puts(seq, "[OTHER]\n"); seq_printf(seq, "fs discard: %-16llu\n", sbi->rw_iostat[FS_DISCARD]); diff --git a/fs/f2fs/trace.h b/fs/f2fs/trace.h index e8075fc5b2284e9a00564d4d416c4115ef1df566..789f6aa727fcf88a0406937ed6238f87298bbe84 100644 --- a/fs/f2fs/trace.h +++ b/fs/f2fs/trace.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0 */ /* * f2fs IO tracer * diff --git a/fs/f2fs/xattr.h b/fs/f2fs/xattr.h index 938fcd20565dccfdaa3a37a3cdd64abe4e1ab831..416d652774a33a39b06e9329535ac7c8e81ad0ca 100644 --- a/fs/f2fs/xattr.h +++ b/fs/f2fs/xattr.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0 */ /* * fs/f2fs/xattr.h * @@ -136,6 +136,7 @@ extern void f2fs_destroy_xattr_caches(struct f2fs_sb_info *); #else #define f2fs_xattr_handlers NULL +#define f2fs_listxattr NULL static inline int f2fs_setxattr(struct inode *inode, int index, const char *name, const void *value, size_t size, struct page *page, int flags) @@ -148,11 +149,6 @@ static inline int f2fs_getxattr(struct inode *inode, int index, { return -EOPNOTSUPP; } -static inline ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, - size_t buffer_size) -{ - return -EOPNOTSUPP; -} static inline int f2fs_init_xattr_caches(struct f2fs_sb_info *sbi) { return 0; } static inline void f2fs_destroy_xattr_caches(struct f2fs_sb_info *sbi) { } #endif diff --git a/fs/fat/inode.c b/fs/fat/inode.c index 154728d207c343462bde4f38665e52681af228e4..3f49ab3c188466c777804ac68ed49596b89ea206 100644 --- a/fs/fat/inode.c +++ b/fs/fat/inode.c @@ -1510,6 +1510,12 @@ static int fat_read_bpb(struct super_block *sb, struct fat_boot_sector *b, goto out; } + if (bpb->fat_fat_length == 0 && bpb->fat32_length == 0) { + if (!silent) + fat_msg(sb, KERN_ERR, "bogus number of FAT sectors"); + goto out; + } + error = 0; out: diff --git a/fs/file.c b/fs/file.c index 0c25b980affe20c76b9d3a8abca3a595d6fab9e4..97c6f0df39daeb7f3a3059ec113913a9324dada6 100644 --- a/fs/file.c +++ b/fs/file.c @@ -75,7 +75,7 @@ static void copy_fd_bitmaps(struct fdtable *nfdt, struct fdtable *ofdt, */ static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt) { - unsigned int cpy, set; + size_t cpy, set; BUG_ON(nfdt->max_fds < ofdt->max_fds); diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 7aea28b7f70e5b6f6b1ee78fbb02e51c1551e5f0..aa5ff287834a1142950e766cc609e4ac17772f8b 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -270,6 +270,7 @@ void __inode_attach_wb(struct inode *inode, struct page *page) if (unlikely(cmpxchg(&inode->i_wb, NULL, wb))) wb_put(wb); } +EXPORT_SYMBOL_GPL(__inode_attach_wb); /** * locked_inode_to_wb_and_lock_list - determine a locked inode's wb and lock it diff --git a/fs/fuse/file.c b/fs/fuse/file.c index c551e1451a7b728fa93df0c32ec3fc556b605d05..25458a78d24fbe7c346a04e7b6271d9264574ef6 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -17,6 +17,7 @@ #include #include #include +#include static const struct file_operations fuse_direct_io_file_operations; @@ -2534,7 +2535,16 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, struct iovec *iov = iov_page; iov->iov_base = (void __user *)arg; - iov->iov_len = _IOC_SIZE(cmd); + + switch (cmd) { + case FS_IOC_GETFLAGS: + case FS_IOC_SETFLAGS: + iov->iov_len = sizeof(int); + break; + default: + iov->iov_len = _IOC_SIZE(cmd); + break; + } if (_IOC_DIR(cmd) & _IOC_WRITE) { in_iov = iov; diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 1e2ff4b32c79aea44981c0e8cc926b32640dea8f..aea1ed0aebd0f6a615b8118775583e7e8e21ad43 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -636,9 +636,6 @@ __acquires(&gl->gl_lockref.lock) goto out_unlock; if (nonblock) goto out_sched; - smp_mb(); - if (atomic_read(&gl->gl_revokes) != 0) - goto out_sched; set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags); GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE); gl->gl_target = gl->gl_demote_state; diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c index a3208511f35aadcb44ec88673b558950a76d2efb..f30418911e1bde09eca0b1bc68f38d0b887b51cf 100644 --- a/fs/gfs2/log.c +++ b/fs/gfs2/log.c @@ -804,8 +804,10 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, * @new: New transaction to be merged */ -static void gfs2_merge_trans(struct gfs2_trans *old, struct gfs2_trans *new) +static void gfs2_merge_trans(struct gfs2_sbd *sdp, struct gfs2_trans *new) { + struct gfs2_trans *old = sdp->sd_log_tr; + WARN_ON_ONCE(!test_bit(TR_ATTACHED, &old->tr_flags)); old->tr_num_buf_new += new->tr_num_buf_new; @@ -817,6 +819,11 @@ static void gfs2_merge_trans(struct gfs2_trans *old, struct gfs2_trans *new) list_splice_tail_init(&new->tr_databuf, &old->tr_databuf); list_splice_tail_init(&new->tr_buf, &old->tr_buf); + + spin_lock(&sdp->sd_ail_lock); + list_splice_tail_init(&new->tr_ail1_list, &old->tr_ail1_list); + list_splice_tail_init(&new->tr_ail2_list, &old->tr_ail2_list); + spin_unlock(&sdp->sd_ail_lock); } static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr) @@ -828,7 +835,7 @@ static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr) gfs2_log_lock(sdp); if (sdp->sd_log_tr) { - gfs2_merge_trans(sdp->sd_log_tr, tr); + gfs2_merge_trans(sdp, tr); } else if (tr->tr_num_buf_new || tr->tr_num_databuf_new) { gfs2_assert_withdraw(sdp, test_bit(TR_ALLOCED, &tr->tr_flags)); sdp->sd_log_tr = tr; diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index 057be88eb1b42cd3113c691ec63c82b802517669..2de67588ac2d8694a1226cc4d43dcccf9ac436a2 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c @@ -922,7 +922,7 @@ static int init_per_node(struct gfs2_sbd *sdp, int undo) } static const match_table_t nolock_tokens = { - { Opt_jid, "jid=%d\n", }, + { Opt_jid, "jid=%d", }, { Opt_err, NULL }, }; @@ -1179,7 +1179,17 @@ static int fill_super(struct super_block *sb, struct gfs2_args *args, int silent goto fail_per_node; } - if (!sb_rdonly(sb)) { + if (sb_rdonly(sb)) { + struct gfs2_holder freeze_gh; + + error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, + GL_EXACT, &freeze_gh); + if (error) { + fs_err(sdp, "can't make FS RO: %d\n", error); + goto fail_per_node; + } + gfs2_glock_dq_uninit(&freeze_gh); + } else { error = gfs2_make_fs_rw(sdp); if (error) { fs_err(sdp, "can't make FS RW: %d\n", error); diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c index e700fb16266474e4a39708244ddecb26ebb23367..a833e2e0716754d7df90eba16d4e9f7388c21c1d 100644 --- a/fs/gfs2/quota.c +++ b/fs/gfs2/quota.c @@ -1039,8 +1039,7 @@ int gfs2_quota_lock(struct gfs2_inode *ip, kuid_t uid, kgid_t gid) u32 x; int error = 0; - if (capable(CAP_SYS_RESOURCE) || - sdp->sd_args.ar_quota != GFS2_QUOTA_ON) + if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON) return 0; error = gfs2_quota_hold(ip, uid, gid); diff --git a/fs/gfs2/quota.h b/fs/gfs2/quota.h index 836f29480be6719c61dc2380a42b8556c6c88a99..e3a6e2404d11d231cc726bfcc55fee06dedcfa9b 100644 --- a/fs/gfs2/quota.h +++ b/fs/gfs2/quota.h @@ -47,7 +47,8 @@ static inline int gfs2_quota_lock_check(struct gfs2_inode *ip, int ret; ap->allowed = UINT_MAX; /* Assume we are permitted a whole lot */ - if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) + if (capable(CAP_SYS_RESOURCE) || + sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) return 0; ret = gfs2_quota_lock(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE); if (ret) diff --git a/fs/incfs/vfs.c b/fs/incfs/vfs.c index b5ec4edc3b7a42a0af1fb1a4d5f5a225a5f53dae..ea29534bea53da58ec78340568c96a58da261daa 100644 --- a/fs/incfs/vfs.c +++ b/fs/incfs/vfs.c @@ -2180,7 +2180,7 @@ struct dentry *incfs_mount_fs(struct file_system_type *type, int flags, sb->s_op = &incfs_super_ops; sb->s_d_op = &incfs_dentry_ops; sb->s_flags |= S_NOATIME; - sb->s_magic = INCFS_MAGIC_NUMBER; + sb->s_magic = (long)INCFS_MAGIC_NUMBER; sb->s_time_gran = 1; sb->s_blocksize = INCFS_DATA_FILE_BLOCK_SIZE; sb->s_blocksize_bits = blksize_bits(sb->s_blocksize); diff --git a/fs/libfs.c b/fs/libfs.c index 4f2ac9ac0c9a8dccf613e4cb7c8a5e8429f2492a..aec2f4570e814c445de0dd6ad6481873ac0c850c 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -1240,11 +1240,27 @@ int generic_ci_d_compare(const struct dentry *dentry, unsigned int len, const struct super_block *sb = dentry->d_sb; const struct unicode_map *um = sb->s_encoding; struct qstr entry = QSTR_INIT(str, len); + char strbuf[DNAME_INLINE_LEN]; int ret; if (!inode || !needs_casefold(inode)) goto fallback; + /* + * If the dentry name is stored in-line, then it may be concurrently + * modified by a rename. If this happens, the VFS will eventually retry + * the lookup, so it doesn't matter what ->d_compare() returns. + * However, it's unsafe to call utf8_strncasecmp() with an unstable + * string. Therefore, we have to copy the name into a temporary buffer. + */ + if (len <= DNAME_INLINE_LEN - 1) { + memcpy(strbuf, str, len); + strbuf[len] = 0; + entry.name = strbuf; + /* prevent compiler from optimizing out the temporary buffer */ + barrier(); + } + ret = utf8_strncasecmp(um, name, &entry); if (ret >= 0) return ret; diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c index 8dbde5ded04252c03d69f47aa3be210e03165e60..74f15498c9bfd97277a3a66c9631b7d9286cdf95 100644 --- a/fs/nfs/flexfilelayout/flexfilelayout.c +++ b/fs/nfs/flexfilelayout/flexfilelayout.c @@ -921,9 +921,8 @@ ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio, goto out_mds; /* Use a direct mapping of ds_idx to pgio mirror_idx */ - if (WARN_ON_ONCE(pgio->pg_mirror_count != - FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg))) - goto out_mds; + if (pgio->pg_mirror_count != FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg)) + goto out_eagain; for (i = 0; i < pgio->pg_mirror_count; i++) { ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, i, true); @@ -942,11 +941,15 @@ ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio, } return; - +out_eagain: + pnfs_generic_pg_cleanup(pgio); + pgio->pg_error = -EAGAIN; + return; out_mds: pnfs_put_lseg(pgio->pg_lseg); pgio->pg_lseg = NULL; nfs_pageio_reset_write_mds(pgio); + pgio->pg_error = -EAGAIN; } static unsigned int diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 4d45786738ab442104c4bbd23cd6604d836c0e7f..a19bbcfab7c5e0267df8e33d8b96d0bf85b3843d 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -7309,7 +7309,7 @@ nfs4_bind_one_conn_to_session_done(struct rpc_task *task, void *calldata) } static const struct rpc_call_ops nfs4_bind_one_conn_to_session_ops = { - .rpc_call_done = &nfs4_bind_one_conn_to_session_done, + .rpc_call_done = nfs4_bind_one_conn_to_session_done, }; /* diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c index 80aeb19b176b2e67413dadefd99544adbeb407cd..22b784e7ef50bee4e56a39f8e1970c3d608612ba 100644 --- a/fs/nfsd/nfs4callback.c +++ b/fs/nfsd/nfs4callback.c @@ -1161,6 +1161,8 @@ static void nfsd4_process_cb_update(struct nfsd4_callback *cb) err = setup_callback_client(clp, &conn, ses); if (err) { nfsd4_mark_cb_down(clp, err); + if (c) + svc_xprt_put(c->cn_xprt); return; } } diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 06d1f2edf2ec6a4a77bc8f9d19b5afbcce64293e..a64065ad8851d1bfa539d29b47355424a8f80b48 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -1202,6 +1202,9 @@ nfsd_create_locked(struct svc_rqst *rqstp, struct svc_fh *fhp, iap->ia_mode = 0; iap->ia_mode = (iap->ia_mode & S_IALLUGO) | type; + if (!IS_POSIXACL(dirp)) + iap->ia_mode &= ~current_umask(); + err = 0; host_err = 0; switch (type) { @@ -1413,6 +1416,9 @@ do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp, goto out; } + if (!IS_POSIXACL(dirp)) + iap->ia_mode &= ~current_umask(); + host_err = vfs_create(dirp, dchild, iap->ia_mode, true); if (host_err < 0) { fh_drop_write(fhp); diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index 5f229f5ea0e0f18ca2e116b063095cd6e89fe0cf..3ac4b2b72593ca412ffa210381746415c15f21be 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c @@ -2790,6 +2790,8 @@ int nilfs_attach_log_writer(struct super_block *sb, struct nilfs_root *root) if (!nilfs->ns_writer) return -ENOMEM; + inode_attach_wb(nilfs->ns_bdev->bd_inode, NULL); + err = nilfs_segctor_start_thread(nilfs->ns_writer); if (err) { kfree(nilfs->ns_writer); diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h index 5bb4a89f9045301161d51ce3d3d471110d67af0c..0773c774e2bfe4f3f16b511e102cdb1efee7dfee 100644 --- a/fs/ocfs2/ocfs2_fs.h +++ b/fs/ocfs2/ocfs2_fs.h @@ -303,7 +303,7 @@ #define OCFS2_MAX_SLOTS 255 /* Slot map indicator for an empty slot */ -#define OCFS2_INVALID_SLOT -1 +#define OCFS2_INVALID_SLOT ((u16)-1) #define OCFS2_VOL_UUID_LEN 16 #define OCFS2_MAX_VOL_LABEL_LEN 64 @@ -339,8 +339,8 @@ struct ocfs2_system_inode_info { enum { BAD_BLOCK_SYSTEM_INODE = 0, GLOBAL_INODE_ALLOC_SYSTEM_INODE, +#define OCFS2_FIRST_ONLINE_SYSTEM_INODE GLOBAL_INODE_ALLOC_SYSTEM_INODE SLOT_MAP_SYSTEM_INODE, -#define OCFS2_FIRST_ONLINE_SYSTEM_INODE SLOT_MAP_SYSTEM_INODE HEARTBEAT_SYSTEM_INODE, GLOBAL_BITMAP_SYSTEM_INODE, USER_QUOTA_SYSTEM_INODE, diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c index 71f22c8fbffd49305fa147c4fafae30ddac31c13..4ca2f71565f9a5c863d273f3d5ade39dc79a8910 100644 --- a/fs/ocfs2/suballoc.c +++ b/fs/ocfs2/suballoc.c @@ -2891,9 +2891,12 @@ int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res) goto bail; } - inode_alloc_inode = - ocfs2_get_system_file_inode(osb, INODE_ALLOC_SYSTEM_INODE, - suballoc_slot); + if (suballoc_slot == (u16)OCFS2_INVALID_SLOT) + inode_alloc_inode = ocfs2_get_system_file_inode(osb, + GLOBAL_INODE_ALLOC_SYSTEM_INODE, suballoc_slot); + else + inode_alloc_inode = ocfs2_get_system_file_inode(osb, + INODE_ALLOC_SYSTEM_INODE, suballoc_slot); if (!inode_alloc_inode) { /* the error code could be inaccurate, but we are not able to * get the correct one. */ diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c index c147a3a36fbff4b66650932e70b9fefe602a738e..4a2603841d77aec6569e46fb2574bd08cfae342e 100644 --- a/fs/overlayfs/copy_up.c +++ b/fs/overlayfs/copy_up.c @@ -59,7 +59,7 @@ int ovl_copy_xattr(struct dentry *old, struct dentry *new) { ssize_t list_size, size, value_size = 0; char *buf, *name, *value = NULL; - int uninitialized_var(error); + int error = 0; size_t slen; if (!(old->d_inode->i_opflags & IOP_XATTR) || diff --git a/fs/proc/inode.c b/fs/proc/inode.c index c5f154be0ab29e2041d851c306b24c5bde3a41a6..a6632f5c6cb8cda4076f01423612b8dc28e60dfa 100644 --- a/fs/proc/inode.c +++ b/fs/proc/inode.c @@ -432,7 +432,7 @@ const struct inode_operations proc_link_inode_operations = { struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de) { - struct inode *inode = new_inode_pseudo(sb); + struct inode *inode = new_inode(sb); if (inode) { inode->i_ino = de->low_ino; diff --git a/fs/proc/self.c b/fs/proc/self.c index 31326bb23b8b472d9b69990e0c99c68c3f2ca33b..398cdf9a9f0c67e22ba9c22f27405525b46e5a75 100644 --- a/fs/proc/self.c +++ b/fs/proc/self.c @@ -41,7 +41,7 @@ int proc_setup_self(struct super_block *s) inode_lock(root_inode); self = d_alloc_name(s->s_root, "self"); if (self) { - struct inode *inode = new_inode_pseudo(s); + struct inode *inode = new_inode(s); if (inode) { inode->i_ino = self_inum; inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode); diff --git a/fs/proc/thread_self.c b/fs/proc/thread_self.c index b813e3b529f266696d45670766ef97203806f090..c6cd35e5ef5d55cccad1ac986b0011165e1ecca2 100644 --- a/fs/proc/thread_self.c +++ b/fs/proc/thread_self.c @@ -42,7 +42,7 @@ int proc_setup_thread_self(struct super_block *s) inode_lock(root_inode); thread_self = d_alloc_name(s->s_root, "thread-self"); if (thread_self) { - struct inode *inode = new_inode_pseudo(s); + struct inode *inode = new_inode(s); if (inode) { inode->i_ino = thread_self_inum; inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode); diff --git a/fs/verity/enable.c b/fs/verity/enable.c index 15e7d14ec2ffc7e09b77dff8b235b351baa2f3f5..d734cebaae701ab1fe9fe99818fb68c6250ea767 100644 --- a/fs/verity/enable.c +++ b/fs/verity/enable.c @@ -329,6 +329,8 @@ static int enable_verity(struct file *filp, /** * fsverity_ioctl_enable() - enable verity on a file + * @filp: file to enable verity on + * @uarg: user pointer to fsverity_enable_arg * * Enable fs-verity on a file. See the "FS_IOC_ENABLE_VERITY" section of * Documentation/filesystems/fsverity.rst for the documentation. diff --git a/fs/verity/fsverity_private.h b/fs/verity/fsverity_private.h index 4b2c8aed0563103ad04f8870a8db7e95f13a51cc..1ce38fded6fd6bbc43351ee8bcf762083fae0080 100644 --- a/fs/verity/fsverity_private.h +++ b/fs/verity/fsverity_private.h @@ -61,7 +61,7 @@ struct merkle_tree_params { u64 level_start[FS_VERITY_MAX_LEVELS]; }; -/** +/* * fsverity_info - cached verity metadata for an inode * * When a verity file is first opened, an instance of this struct is allocated @@ -134,7 +134,7 @@ void __init fsverity_check_hash_algs(void); /* init.c */ -extern void __printf(3, 4) __cold +void __printf(3, 4) __cold fsverity_msg(const struct inode *inode, const char *level, const char *fmt, ...); diff --git a/fs/verity/measure.c b/fs/verity/measure.c index 05049b68c74553fb6b865818c989b00e5c2da36e..df409a5682edf9a568219d541a0902e9abb91bb9 100644 --- a/fs/verity/measure.c +++ b/fs/verity/measure.c @@ -11,6 +11,8 @@ /** * fsverity_ioctl_measure() - get a verity file's measurement + * @filp: file to get measurement of + * @_uarg: user pointer to fsverity_digest * * Retrieve the file measurement that the kernel is enforcing for reads from a * verity file. See the "FS_IOC_MEASURE_VERITY" section of diff --git a/fs/verity/open.c b/fs/verity/open.c index 25b29065d897cb558105ec725211eb46a39a342d..71c5dc279b2035d48bf20f9f615eb905845ba40a 100644 --- a/fs/verity/open.c +++ b/fs/verity/open.c @@ -330,6 +330,7 @@ EXPORT_SYMBOL_GPL(fsverity_prepare_setattr); /** * fsverity_cleanup_inode() - free the inode's verity info, if present + * @inode: an inode being evicted * * Filesystems must call this on inode eviction to free ->i_verity_info. */ diff --git a/fs/verity/signature.c b/fs/verity/signature.c index 3dfc56f2cdf1207b15acba0cf6f24e471661f04a..9cfd2f5ff57dd4ed55b6d033ee15ee89173e964e 100644 --- a/fs/verity/signature.c +++ b/fs/verity/signature.c @@ -28,6 +28,9 @@ static struct key *fsverity_keyring; /** * fsverity_verify_signature() - check a verity file's signature + * @vi: the file's fsverity_info + * @desc: the file's fsverity_descriptor + * @desc_size: size of @desc * * If the file's fs-verity descriptor includes a signature of the file * measurement, verify it against the certificates in the fs-verity keyring. diff --git a/fs/verity/verify.c b/fs/verity/verify.c index 5324270cd7d474f1506c481a9b613568e722717b..fb4c745087ec720f958d6d25b53c571f29b725ed 100644 --- a/fs/verity/verify.c +++ b/fs/verity/verify.c @@ -179,6 +179,7 @@ static bool verify_page(struct inode *inode, const struct fsverity_info *vi, /** * fsverity_verify_page() - verify a data page + * @page: the page to verity * * Verify a page that has just been read from a verity file. The page must be a * pagecache page that is still locked and not yet uptodate. @@ -206,6 +207,7 @@ EXPORT_SYMBOL_GPL(fsverity_verify_page); #ifdef CONFIG_BLOCK /** * fsverity_verify_bio() - verify a 'read' bio that has just completed + * @bio: the bio to verify * * Verify a set of pages that have just been read from a verity file. The pages * must be pagecache pages that are still locked and not yet uptodate. Pages @@ -264,6 +266,7 @@ EXPORT_SYMBOL_GPL(fsverity_verify_bio); /** * fsverity_enqueue_verify_work() - enqueue work on the fs-verity workqueue + * @work: the work to enqueue * * Enqueue verification work for asynchronous processing. */ diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c index 516e0c57cf9c4257e8e3389bc98ec199840dd814..a10d9a3c181e4534216214d33830b515fb6b3bf9 100644 --- a/fs/xfs/libxfs/xfs_alloc.c +++ b/fs/xfs/libxfs/xfs_alloc.c @@ -2529,6 +2529,13 @@ xfs_agf_verify( be32_to_cpu(agf->agf_flcount) <= xfs_agfl_size(mp))) return false; + if (be32_to_cpu(agf->agf_length) > mp->m_sb.sb_dblocks) + return false; + + if (be32_to_cpu(agf->agf_freeblks) < be32_to_cpu(agf->agf_longest) || + be32_to_cpu(agf->agf_freeblks) > be32_to_cpu(agf->agf_length)) + return false; + if (be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) < 1 || be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]) < 1 || be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) > XFS_BTREE_MAXLEVELS || @@ -2540,6 +2547,10 @@ xfs_agf_verify( be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) > XFS_BTREE_MAXLEVELS)) return false; + if (xfs_sb_version_hasrmapbt(&mp->m_sb) && + be32_to_cpu(agf->agf_rmap_blocks) > be32_to_cpu(agf->agf_length)) + return false; + /* * during growfs operations, the perag is not fully initialised, * so we can't use it for any useful checking. growfs ensures we can't @@ -2553,6 +2564,11 @@ xfs_agf_verify( be32_to_cpu(agf->agf_btreeblks) > be32_to_cpu(agf->agf_length)) return false; + if (xfs_sb_version_hasreflink(&mp->m_sb) && + be32_to_cpu(agf->agf_refcount_blocks) > + be32_to_cpu(agf->agf_length)) + return false; + if (xfs_sb_version_hasreflink(&mp->m_sb) && (be32_to_cpu(agf->agf_refcount_level) < 1 || be32_to_cpu(agf->agf_refcount_level) > XFS_BTREE_MAXLEVELS)) diff --git a/include/asm-generic/topology.h b/include/asm-generic/topology.h index 5d2add1a6c964870b212f848879338f7b9b3ba18..864fcfa1df4130fdfdcca037dbc7214c15ff08b2 100644 --- a/include/asm-generic/topology.h +++ b/include/asm-generic/topology.h @@ -51,7 +51,7 @@ #ifdef CONFIG_NEED_MULTIPLE_NODES #define cpumask_of_node(node) ((node) == 0 ? cpu_online_mask : cpu_none_mask) #else - #define cpumask_of_node(node) ((void)node, cpu_online_mask) + #define cpumask_of_node(node) ((void)(node), cpu_online_mask) #endif #endif #ifndef pcibus_to_node diff --git a/include/crypto/ice.h b/include/crypto/ice.h index 20d2f21f4a5a8b186033e1d35f9661e609df4b00..fb66aa3c4708123070d1c493b5e6141807616ff9 100644 --- a/include/crypto/ice.h +++ b/include/crypto/ice.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved. +/* Copyright (c) 2014-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -69,6 +69,8 @@ struct platform_device *qcom_ice_get_pdevice(struct device_node *node); #ifdef CONFIG_CRYPTO_DEV_QCOM_ICE int qcom_ice_setup_ice_hw(const char *storage_type, int enable); void qcom_ice_set_fde_flag(int flag); +int qcom_ice_config_start(struct request *req, + struct ice_data_setting *setting); #else static inline int qcom_ice_setup_ice_hw(const char *storage_type, int enable) { @@ -79,12 +81,10 @@ static inline void qcom_ice_set_fde_flag(int flag) {} struct qcom_ice_variant_ops { const char *name; - int (*init)(struct platform_device *, void *, ice_error_cb); int (*reset)(struct platform_device *); int (*resume)(struct platform_device *); int (*suspend)(struct platform_device *); - int (*config_start)(struct platform_device *, struct request *, - struct ice_data_setting *, bool); + int (*config_start)(struct request *, struct ice_data_setting *); int (*config_end)(struct request *); int (*status)(struct platform_device *); void (*debug)(struct platform_device *); diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h index ac0eae8372ab64fd458d75885a45a023b4ddc62c..2ea6a95ca825bcece2198b0ac34bb45ad9fba6cc 100644 --- a/include/crypto/if_alg.h +++ b/include/crypto/if_alg.h @@ -34,8 +34,8 @@ struct alg_sock { struct sock *parent; - unsigned int refcnt; - unsigned int nokey_refcnt; + atomic_t refcnt; + atomic_t nokey_refcnt; const struct af_alg_type *type; void *private; diff --git a/include/dt-bindings/thermal/qmi_thermal.h b/include/dt-bindings/thermal/qmi_thermal.h index d41157d9d73c632faa2b318fe2f9f4a696c6b03a..897683568cb826b20624f6df808b58cfa3c64564 100644 --- a/include/dt-bindings/thermal/qmi_thermal.h +++ b/include/dt-bindings/thermal/qmi_thermal.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. +/* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -39,6 +39,12 @@ #define QMI_SYS_THERM_1 23 #define QMI_SYS_THERM_2 24 #define QMI_MODEM_TSENS_1 25 +#define QMI_QFE_RET_PA_0_FR1 26 +#define QMI_QFE_WTR_PA_0_FR1 27 +#define QMI_QFE_WTR_PA_1_FR1 28 +#define QMI_QFE_WTR_PA_2_FR1 29 +#define QMI_QFE_WTR_PA_3_FR1 30 + #define QMI_MODEM_INST_ID 0x0 #define QMI_ADSP_INST_ID 0x1 diff --git a/include/linux/bio-crypt-ctx.h b/include/linux/bio-crypt-ctx.h index 45d331bcc2e4386782ae974f63cb1802715ed7cd..efb3c884d3300d490af1ecfe208d63975280537b 100644 --- a/include/linux/bio-crypt-ctx.h +++ b/include/linux/bio-crypt-ctx.h @@ -106,6 +106,7 @@ struct bio_crypt_ctx { * with keyslot. */ struct keyslot_manager *bc_ksm; + bool is_ext4; }; int bio_crypt_ctx_init(void); @@ -132,6 +133,7 @@ static inline void bio_crypt_set_ctx(struct bio *bio, memcpy(bc->bc_dun, dun, sizeof(bc->bc_dun)); bc->bc_ksm = NULL; bc->bc_keyslot = -1; + bc->is_ext4 = 0; bio->bi_crypt_context = bc; } diff --git a/include/linux/bitops.h b/include/linux/bitops.h index c51574fab0b009e728ec74a1e65aa5218b1bc1c2..00dcb1bad76b8e6ecc44175a7f2ce7cd9f808f04 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -50,7 +50,7 @@ static inline int get_bitmask_order(unsigned int count) static __always_inline unsigned long hweight_long(unsigned long w) { - return sizeof(w) == 4 ? hweight32(w) : hweight64(w); + return sizeof(w) == 4 ? hweight32(w) : hweight64((__u64)w); } /** diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 6ba2db771182184816079cbc5f7f0202e62f4f96..d6215795604cfdeee7a4e3d863f856310b7405e3 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -574,7 +574,7 @@ struct request_queue { unsigned int sg_reserved_size; int node; #ifdef CONFIG_BLK_DEV_IO_TRACE - struct blk_trace *blk_trace; + struct blk_trace __rcu *blk_trace; struct mutex blk_trace_mutex; #endif /* diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h index 7bb2d8de9f308e367bcda4a5484f67483f8fc8e5..3b6ff5902edce65c9ec1dc1ccb0497b0ac521194 100644 --- a/include/linux/blktrace_api.h +++ b/include/linux/blktrace_api.h @@ -51,9 +51,13 @@ void __trace_note_message(struct blk_trace *, struct blkcg *blkcg, const char *f **/ #define blk_add_cgroup_trace_msg(q, cg, fmt, ...) \ do { \ - struct blk_trace *bt = (q)->blk_trace; \ + struct blk_trace *bt; \ + \ + rcu_read_lock(); \ + bt = rcu_dereference((q)->blk_trace); \ if (unlikely(bt)) \ __trace_note_message(bt, cg, fmt, ##__VA_ARGS__);\ + rcu_read_unlock(); \ } while (0) #define blk_add_trace_msg(q, fmt, ...) \ blk_add_cgroup_trace_msg(q, NULL, fmt, ##__VA_ARGS__) @@ -61,10 +65,14 @@ void __trace_note_message(struct blk_trace *, struct blkcg *blkcg, const char *f static inline bool blk_trace_note_message_enabled(struct request_queue *q) { - struct blk_trace *bt = q->blk_trace; - if (likely(!bt)) - return false; - return bt->act_mask & BLK_TC_NOTIFY; + struct blk_trace *bt; + bool ret; + + rcu_read_lock(); + bt = rcu_dereference(q->blk_trace); + ret = bt && (bt->act_mask & BLK_TC_NOTIFY); + rcu_read_unlock(); + return ret; } extern void blk_add_driver_data(struct request_queue *q, struct request *rq, diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index b7773937874669c6f7fc0218dc68573d5ebd2ad8..2604f8750f54661133a620d310ef7bef4f17d2c6 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -689,7 +689,9 @@ struct sock_cgroup_data { union { #ifdef __LITTLE_ENDIAN struct { - u8 is_data; + u8 is_data : 1; + u8 no_refcnt : 1; + u8 unused : 6; u8 padding; u16 prioidx; u32 classid; @@ -699,7 +701,9 @@ struct sock_cgroup_data { u32 classid; u16 prioidx; u8 padding; - u8 is_data; + u8 unused : 6; + u8 no_refcnt : 1; + u8 is_data : 1; } __packed; #endif u64 val; diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index a22949de5b4047a06b41c0459a982a61557a966a..05ee3c0ad3e4c6949bd67037a91560e0f898bbac 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -729,6 +729,7 @@ extern spinlock_t cgroup_sk_update_lock; void cgroup_sk_alloc_disable(void); void cgroup_sk_alloc(struct sock_cgroup_data *skcd); +void cgroup_sk_clone(struct sock_cgroup_data *skcd); void cgroup_sk_free(struct sock_cgroup_data *skcd); static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd) @@ -742,7 +743,7 @@ static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd) */ v = READ_ONCE(skcd->val); - if (v & 1) + if (v & 3) return &cgrp_dfl_root.cgrp; return (struct cgroup *)(unsigned long)v ?: &cgrp_dfl_root.cgrp; @@ -754,6 +755,7 @@ static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd) #else /* CONFIG_CGROUP_DATA */ static inline void cgroup_sk_alloc(struct sock_cgroup_data *skcd) {} +static inline void cgroup_sk_clone(struct sock_cgroup_data *skcd) {} static inline void cgroup_sk_free(struct sock_cgroup_data *skcd) {} #endif /* CONFIG_CGROUP_DATA */ diff --git a/include/linux/compiler.h b/include/linux/compiler.h index b1ddba09711d07fd805f5ef5f32630cef5414ceb..6d2ef5015afd271d58c1ab49dc32984d7e9588cc 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -382,4 +382,10 @@ unsigned long read_word_at_a_time(const void *addr) (_________p1); \ }) +/* + * This is needed in functions which generate the stack canary, see + * arch/x86/kernel/smpboot.c::start_secondary() for an example. + */ +#define prevent_tail_call_optimization() mb() + #endif /* __LINUX_COMPILER_H */ diff --git a/include/linux/elfnote.h b/include/linux/elfnote.h index f236f5b931b2a0ef0021556bf1b41be09668062a..7fdd7f355b529c9b6f46967630a709e47fd8caaf 100644 --- a/include/linux/elfnote.h +++ b/include/linux/elfnote.h @@ -54,7 +54,7 @@ .popsection ; #define ELFNOTE(name, type, desc) \ - ELFNOTE_START(name, type, "") \ + ELFNOTE_START(name, type, "a") \ desc ; \ ELFNOTE_END diff --git a/include/linux/fs.h b/include/linux/fs.h index 0a4e61ac3e1fe9a6418212a0e18273a88bdfc8a1..ec7511166197fdfc96f5f61adc163b89ffd22f22 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -909,7 +909,7 @@ struct file_handle { __u32 handle_bytes; int handle_type; /* file identifier */ - unsigned char f_handle[0]; + unsigned char f_handle[]; }; static inline struct file *get_file(struct file *f) diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h index db99db6e9458a0b79b19e967fd3f49141b9c8138..a83fbaffaac612d7d1229933a3cc323cfc272ad4 100644 --- a/include/linux/fscrypt.h +++ b/include/linux/fscrypt.h @@ -15,12 +15,15 @@ #include #include +#include #include #include #define FS_CRYPTO_BLOCK_SIZE 16 +union fscrypt_context; struct fscrypt_info; +struct seq_file; struct fscrypt_str { unsigned char *name; @@ -56,10 +59,12 @@ struct fscrypt_name { struct fscrypt_operations { unsigned int flags; const char *key_prefix; - int (*get_context)(struct inode *, void *, size_t); - int (*set_context)(struct inode *, const void *, size_t, void *); - bool (*dummy_context)(struct inode *); - bool (*empty_dir)(struct inode *); + int (*get_context)(struct inode *inode, void *ctx, size_t len); + int (*set_context)(struct inode *inode, const void *ctx, size_t len, + void *fs_data); + const union fscrypt_context *(*get_dummy_context)( + struct super_block *sb); + bool (*empty_dir)(struct inode *inode); unsigned int max_namelen; bool (*is_encrypted)(struct inode *); bool (*has_stable_inodes)(struct super_block *sb); @@ -80,6 +85,7 @@ static inline bool fscrypt_has_encryption_key(const struct inode *inode) /** * fscrypt_needs_contents_encryption() - check whether an inode needs * contents encryption + * @inode: the inode to check * * Return: %true iff the inode is an encrypted regular file and the kernel was * built with fscrypt support. @@ -92,10 +98,12 @@ static inline bool fscrypt_needs_contents_encryption(const struct inode *inode) return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode); } -static inline bool fscrypt_dummy_context_enabled(struct inode *inode) +static inline const union fscrypt_context * +fscrypt_get_dummy_context(struct super_block *sb) { - return inode->i_sb->s_cop->dummy_context && - inode->i_sb->s_cop->dummy_context(inode); + if (!sb->s_cop->get_dummy_context) + return NULL; + return sb->s_cop->get_dummy_context(sb); } /* @@ -111,22 +119,21 @@ static inline void fscrypt_handle_d_move(struct dentry *dentry) } /* crypto.c */ -extern void fscrypt_enqueue_decrypt_work(struct work_struct *); - -extern struct page *fscrypt_encrypt_pagecache_blocks(struct page *page, - unsigned int len, - unsigned int offs, - gfp_t gfp_flags); -extern int fscrypt_encrypt_block_inplace(const struct inode *inode, - struct page *page, unsigned int len, - unsigned int offs, u64 lblk_num, - gfp_t gfp_flags); - -extern int fscrypt_decrypt_pagecache_blocks(struct page *page, unsigned int len, - unsigned int offs); -extern int fscrypt_decrypt_block_inplace(const struct inode *inode, - struct page *page, unsigned int len, - unsigned int offs, u64 lblk_num); +void fscrypt_enqueue_decrypt_work(struct work_struct *); + +struct page *fscrypt_encrypt_pagecache_blocks(struct page *page, + unsigned int len, + unsigned int offs, + gfp_t gfp_flags); +int fscrypt_encrypt_block_inplace(const struct inode *inode, struct page *page, + unsigned int len, unsigned int offs, + u64 lblk_num, gfp_t gfp_flags); + +int fscrypt_decrypt_pagecache_blocks(struct page *page, unsigned int len, + unsigned int offs); +int fscrypt_decrypt_block_inplace(const struct inode *inode, struct page *page, + unsigned int len, unsigned int offs, + u64 lblk_num); static inline bool fscrypt_is_bounce_page(struct page *page) { @@ -138,81 +145,93 @@ static inline struct page *fscrypt_pagecache_page(struct page *bounce_page) return (struct page *)page_private(bounce_page); } -extern void fscrypt_free_bounce_page(struct page *bounce_page); -extern int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags); +void fscrypt_free_bounce_page(struct page *bounce_page); +int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags); /* policy.c */ -extern int fscrypt_ioctl_set_policy(struct file *, const void __user *); -extern int fscrypt_ioctl_get_policy(struct file *, void __user *); -extern int fscrypt_ioctl_get_policy_ex(struct file *, void __user *); -extern int fscrypt_ioctl_get_nonce(struct file *filp, void __user *arg); -extern int fscrypt_has_permitted_context(struct inode *, struct inode *); -extern int fscrypt_inherit_context(struct inode *, struct inode *, - void *, bool); +int fscrypt_ioctl_set_policy(struct file *filp, const void __user *arg); +int fscrypt_ioctl_get_policy(struct file *filp, void __user *arg); +int fscrypt_ioctl_get_policy_ex(struct file *filp, void __user *arg); +int fscrypt_ioctl_get_nonce(struct file *filp, void __user *arg); +int fscrypt_has_permitted_context(struct inode *parent, struct inode *child); +int fscrypt_inherit_context(struct inode *parent, struct inode *child, + void *fs_data, bool preload); + +struct fscrypt_dummy_context { + const union fscrypt_context *ctx; +}; + +int fscrypt_set_test_dummy_encryption(struct super_block *sb, + const substring_t *arg, + struct fscrypt_dummy_context *dummy_ctx); +void fscrypt_show_test_dummy_encryption(struct seq_file *seq, char sep, + struct super_block *sb); +static inline void +fscrypt_free_dummy_context(struct fscrypt_dummy_context *dummy_ctx) +{ + kfree(dummy_ctx->ctx); + dummy_ctx->ctx = NULL; +} + /* keyring.c */ -extern void fscrypt_sb_free(struct super_block *sb); -extern int fscrypt_ioctl_add_key(struct file *filp, void __user *arg); -extern int fscrypt_ioctl_remove_key(struct file *filp, void __user *arg); -extern int fscrypt_ioctl_remove_key_all_users(struct file *filp, - void __user *arg); -extern int fscrypt_ioctl_get_key_status(struct file *filp, void __user *arg); -extern int fscrypt_register_key_removal_notifier(struct notifier_block *nb); -extern int fscrypt_unregister_key_removal_notifier(struct notifier_block *nb); +void fscrypt_sb_free(struct super_block *sb); +int fscrypt_ioctl_add_key(struct file *filp, void __user *arg); +int fscrypt_ioctl_remove_key(struct file *filp, void __user *arg); +int fscrypt_ioctl_remove_key_all_users(struct file *filp, void __user *arg); +int fscrypt_ioctl_get_key_status(struct file *filp, void __user *arg); +int fscrypt_register_key_removal_notifier(struct notifier_block *nb); +int fscrypt_unregister_key_removal_notifier(struct notifier_block *nb); /* keysetup.c */ -extern int fscrypt_get_encryption_info(struct inode *); -extern void fscrypt_put_encryption_info(struct inode *); -extern void fscrypt_free_inode(struct inode *); -extern int fscrypt_drop_inode(struct inode *inode); +int fscrypt_get_encryption_info(struct inode *inode); +void fscrypt_put_encryption_info(struct inode *inode); +void fscrypt_free_inode(struct inode *inode); +int fscrypt_drop_inode(struct inode *inode); /* fname.c */ -extern int fscrypt_setup_filename(struct inode *, const struct qstr *, - int lookup, struct fscrypt_name *); +int fscrypt_setup_filename(struct inode *inode, const struct qstr *iname, + int lookup, struct fscrypt_name *fname); static inline void fscrypt_free_filename(struct fscrypt_name *fname) { kfree(fname->crypto_buf.name); } -extern int fscrypt_fname_alloc_buffer(const struct inode *, u32, - struct fscrypt_str *); -extern void fscrypt_fname_free_buffer(struct fscrypt_str *); -extern int fscrypt_fname_disk_to_usr(const struct inode *inode, - u32 hash, u32 minor_hash, - const struct fscrypt_str *iname, - struct fscrypt_str *oname); -extern bool fscrypt_match_name(const struct fscrypt_name *fname, - const u8 *de_name, u32 de_name_len); -extern u64 fscrypt_fname_siphash(const struct inode *dir, - const struct qstr *name); +int fscrypt_fname_alloc_buffer(const struct inode *inode, u32 max_encrypted_len, + struct fscrypt_str *crypto_str); +void fscrypt_fname_free_buffer(struct fscrypt_str *crypto_str); +int fscrypt_fname_disk_to_usr(const struct inode *inode, + u32 hash, u32 minor_hash, + const struct fscrypt_str *iname, + struct fscrypt_str *oname); +bool fscrypt_match_name(const struct fscrypt_name *fname, + const u8 *de_name, u32 de_name_len); +u64 fscrypt_fname_siphash(const struct inode *dir, const struct qstr *name); /* bio.c */ -extern void fscrypt_decrypt_bio(struct bio *); -extern int fscrypt_zeroout_range(const struct inode *, pgoff_t, sector_t, - unsigned int); +void fscrypt_decrypt_bio(struct bio *bio); +int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk, + sector_t pblk, unsigned int len); /* hooks.c */ -extern int fscrypt_file_open(struct inode *inode, struct file *filp); -extern int __fscrypt_prepare_link(struct inode *inode, struct inode *dir, - struct dentry *dentry); -extern int __fscrypt_prepare_rename(struct inode *old_dir, - struct dentry *old_dentry, - struct inode *new_dir, - struct dentry *new_dentry, - unsigned int flags); -extern int __fscrypt_prepare_lookup(struct inode *dir, struct dentry *dentry, - struct fscrypt_name *fname); -extern int fscrypt_prepare_setflags(struct inode *inode, - unsigned int oldflags, unsigned int flags); -extern int __fscrypt_prepare_symlink(struct inode *dir, unsigned int len, - unsigned int max_len, - struct fscrypt_str *disk_link); -extern int __fscrypt_encrypt_symlink(struct inode *inode, const char *target, - unsigned int len, - struct fscrypt_str *disk_link); -extern const char *fscrypt_get_symlink(struct inode *inode, const void *caddr, - unsigned int max_size, - struct delayed_call *done); +int fscrypt_file_open(struct inode *inode, struct file *filp); +int __fscrypt_prepare_link(struct inode *inode, struct inode *dir, + struct dentry *dentry); +int __fscrypt_prepare_rename(struct inode *old_dir, struct dentry *old_dentry, + struct inode *new_dir, struct dentry *new_dentry, + unsigned int flags); +int __fscrypt_prepare_lookup(struct inode *dir, struct dentry *dentry, + struct fscrypt_name *fname); +int fscrypt_prepare_setflags(struct inode *inode, + unsigned int oldflags, unsigned int flags); +int __fscrypt_prepare_symlink(struct inode *dir, unsigned int len, + unsigned int max_len, + struct fscrypt_str *disk_link); +int __fscrypt_encrypt_symlink(struct inode *inode, const char *target, + unsigned int len, struct fscrypt_str *disk_link); +const char *fscrypt_get_symlink(struct inode *inode, const void *caddr, + unsigned int max_size, + struct delayed_call *done); #else /* !CONFIG_FS_ENCRYPTION */ static inline bool fscrypt_has_encryption_key(const struct inode *inode) @@ -225,9 +244,10 @@ static inline bool fscrypt_needs_contents_encryption(const struct inode *inode) return false; } -static inline bool fscrypt_dummy_context_enabled(struct inode *inode) +static inline const union fscrypt_context * +fscrypt_get_dummy_context(struct super_block *sb) { - return false; + return NULL; } static inline void fscrypt_handle_d_move(struct dentry *dentry) @@ -322,6 +342,20 @@ static inline int fscrypt_inherit_context(struct inode *parent, return -EOPNOTSUPP; } +struct fscrypt_dummy_context { +}; + +static inline void fscrypt_show_test_dummy_encryption(struct seq_file *seq, + char sep, + struct super_block *sb) +{ +} + +static inline void +fscrypt_free_dummy_context(struct fscrypt_dummy_context *dummy_ctx) +{ +} + /* keyring.c */ static inline void fscrypt_sb_free(struct super_block *sb) { @@ -600,7 +634,7 @@ fscrypt_inode_should_skip_dm_default_key(const struct inode *inode) #endif /** - * fscrypt_require_key - require an inode's encryption key + * fscrypt_require_key() - require an inode's encryption key * @inode: the inode we need the key for * * If the inode is encrypted, set up its encryption key if not already done. @@ -626,7 +660,8 @@ static inline int fscrypt_require_key(struct inode *inode) } /** - * fscrypt_prepare_link - prepare to link an inode into a possibly-encrypted directory + * fscrypt_prepare_link() - prepare to link an inode into a possibly-encrypted + * directory * @old_dentry: an existing dentry for the inode being linked * @dir: the target directory * @dentry: negative dentry for the target filename @@ -653,7 +688,8 @@ static inline int fscrypt_prepare_link(struct dentry *old_dentry, } /** - * fscrypt_prepare_rename - prepare for a rename between possibly-encrypted directories + * fscrypt_prepare_rename() - prepare for a rename between possibly-encrypted + * directories * @old_dir: source directory * @old_dentry: dentry for source file * @new_dir: target directory @@ -686,7 +722,8 @@ static inline int fscrypt_prepare_rename(struct inode *old_dir, } /** - * fscrypt_prepare_lookup - prepare to lookup a name in a possibly-encrypted directory + * fscrypt_prepare_lookup() - prepare to lookup a name in a possibly-encrypted + * directory * @dir: directory being searched * @dentry: filename being looked up * @fname: (output) the name to use to search the on-disk directory @@ -720,7 +757,8 @@ static inline int fscrypt_prepare_lookup(struct inode *dir, } /** - * fscrypt_prepare_setattr - prepare to change a possibly-encrypted inode's attributes + * fscrypt_prepare_setattr() - prepare to change a possibly-encrypted inode's + * attributes * @dentry: dentry through which the inode is being changed * @attr: attributes to change * @@ -745,7 +783,7 @@ static inline int fscrypt_prepare_setattr(struct dentry *dentry, } /** - * fscrypt_prepare_symlink - prepare to create a possibly-encrypted symlink + * fscrypt_prepare_symlink() - prepare to create a possibly-encrypted symlink * @dir: directory in which the symlink is being created * @target: plaintext symlink target * @len: length of @target excluding null terminator @@ -773,7 +811,7 @@ static inline int fscrypt_prepare_symlink(struct inode *dir, unsigned int max_len, struct fscrypt_str *disk_link) { - if (IS_ENCRYPTED(dir) || fscrypt_dummy_context_enabled(dir)) + if (IS_ENCRYPTED(dir) || fscrypt_get_dummy_context(dir->i_sb) != NULL) return __fscrypt_prepare_symlink(dir, len, max_len, disk_link); disk_link->name = (unsigned char *)target; @@ -784,7 +822,7 @@ static inline int fscrypt_prepare_symlink(struct inode *dir, } /** - * fscrypt_encrypt_symlink - encrypt the symlink target if needed + * fscrypt_encrypt_symlink() - encrypt the symlink target if needed * @inode: symlink inode * @target: plaintext symlink target * @len: length of @target excluding null terminator diff --git a/include/linux/fsverity.h b/include/linux/fsverity.h index ecc604e61d61b9ffcc96946904f5608e837168b5..78201a6d35f66d8b077f0d9faab6bc8be1315750 100644 --- a/include/linux/fsverity.h +++ b/include/linux/fsverity.h @@ -121,23 +121,23 @@ static inline struct fsverity_info *fsverity_get_info(const struct inode *inode) /* enable.c */ -extern int fsverity_ioctl_enable(struct file *filp, const void __user *arg); +int fsverity_ioctl_enable(struct file *filp, const void __user *arg); /* measure.c */ -extern int fsverity_ioctl_measure(struct file *filp, void __user *arg); +int fsverity_ioctl_measure(struct file *filp, void __user *arg); /* open.c */ -extern int fsverity_file_open(struct inode *inode, struct file *filp); -extern int fsverity_prepare_setattr(struct dentry *dentry, struct iattr *attr); -extern void fsverity_cleanup_inode(struct inode *inode); +int fsverity_file_open(struct inode *inode, struct file *filp); +int fsverity_prepare_setattr(struct dentry *dentry, struct iattr *attr); +void fsverity_cleanup_inode(struct inode *inode); /* verify.c */ -extern bool fsverity_verify_page(struct page *page); -extern void fsverity_verify_bio(struct bio *bio); -extern void fsverity_enqueue_verify_work(struct work_struct *work); +bool fsverity_verify_page(struct page *page); +void fsverity_verify_bio(struct bio *bio); +void fsverity_enqueue_verify_work(struct work_struct *work); #else /* !CONFIG_FS_VERITY */ @@ -200,6 +200,7 @@ static inline void fsverity_enqueue_verify_work(struct work_struct *work) /** * fsverity_active() - do reads from the inode need to go through fs-verity? + * @inode: inode to check * * This checks whether ->i_verity_info has been set. * @@ -207,6 +208,8 @@ static inline void fsverity_enqueue_verify_work(struct work_struct *work) * be verified or not. Don't use IS_VERITY() for this purpose; it's subject to * a race condition where the file is being read concurrently with * FS_IOC_ENABLE_VERITY completing. (S_VERITY is set before ->i_verity_info.) + * + * Return: true if reads need to go through fs-verity, otherwise false */ static inline bool fsverity_active(const struct inode *inode) { diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 19687c9bc4d2b93868c2d2d51b788ee11542cd1b..e489e8865dbeeaaa1b0d9bae1e4dea5fd25ad8f0 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -694,9 +694,11 @@ static inline sector_t part_nr_sects_read(struct hd_struct *part) static inline void part_nr_sects_write(struct hd_struct *part, sector_t size) { #if BITS_PER_LONG==32 && defined(CONFIG_LBDAF) && defined(CONFIG_SMP) + preempt_disable(); write_seqcount_begin(&part->nr_sects_seq); part->nr_sects = size; write_seqcount_end(&part->nr_sects_seq); + preempt_enable(); #elif BITS_PER_LONG==32 && defined(CONFIG_LBDAF) && defined(CONFIG_PREEMPT) preempt_disable(); part->nr_sects = size; diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h index 58df02bd93c9f04d0546a32de278503918d3e4f8..fa46183b163bca4ff8f37a6af07c52fb41028158 100644 --- a/include/linux/io-mapping.h +++ b/include/linux/io-mapping.h @@ -120,9 +120,12 @@ io_mapping_init_wc(struct io_mapping *iomap, resource_size_t base, unsigned long size) { + iomap->iomem = ioremap_wc(base, size); + if (!iomap->iomem) + return NULL; + iomap->base = base; iomap->size = size; - iomap->iomem = ioremap_wc(base, size); #if defined(pgprot_noncached_wc) /* archs can't agree on a name ... */ iomap->prot = pgprot_noncached_wc(PAGE_KERNEL); #elif defined(pgprot_writecombine) diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h index e465bb15912d98cd1136f985a110bf0347667bd1..6be5545d3584f9b69a5f2b46e9e6bd84ba1c5bef 100644 --- a/include/linux/kgdb.h +++ b/include/linux/kgdb.h @@ -317,7 +317,7 @@ extern void gdbstub_exit(int status); extern int kgdb_single_step; extern atomic_t kgdb_active; #define in_dbg_master() \ - (raw_smp_processor_id() == atomic_read(&kgdb_active)) + (irqs_disabled() && (smp_processor_id() == atomic_read(&kgdb_active))) extern bool dbg_is_early; extern void __init dbg_late_init(void); #else /* ! CONFIG_KGDB */ diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 520702b821340db484ccea1677e078d5f1a6e643..be7a49f437ea398304fc3d424b63e15af6ec8af7 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -385,6 +385,10 @@ static inline struct kprobe_ctlblk *get_kprobe_ctlblk(void) return this_cpu_ptr(&kprobe_ctlblk); } +extern struct kprobe kprobe_busy; +void kprobe_busy_begin(void); +void kprobe_busy_end(void); + kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset); int register_kprobe(struct kprobe *p); void unregister_kprobe(struct kprobe *p); diff --git a/include/linux/libata.h b/include/linux/libata.h index 93838d98e3f38cd6a675e38eb4259a251664a4da..5c9a44e3a02788f6e845918123b89dfdb68f11c6 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -38,6 +38,7 @@ #include #include #include +#include /* * Define if arch has non-standard setup. This is a _PCI_ standard @@ -884,6 +885,8 @@ struct ata_port { struct timer_list fastdrain_timer; unsigned long fastdrain_cnt; + async_cookie_t cookie; + int em_message_type; void *private_data; diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 32d4453151285e5de99474b6f8a753c84d1349f3..983cd796cbb3a04fe6c7f9446978d8f434d82006 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -841,6 +841,7 @@ struct mlx5_cmd_work_ent { struct delayed_work cb_timeout_work; void *context; int idx; + struct completion handling; struct completion done; struct mlx5_cmd *cmd; struct work_struct work; diff --git a/include/linux/mm.h b/include/linux/mm.h index b1d9412e60a249153cbacda4e9e842aa602f7ee9..34236fbc50793be11318dfb4bcc8047e1a066d80 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -572,7 +572,13 @@ static inline void *kvmalloc_array(size_t n, size_t size, gfp_t flags) } extern void kvfree(const void *addr); +extern void kvfree_sensitive(const void *addr, size_t len); +/* + * Mapcount of compound page as a whole, does not include mapped sub-pages. + * + * Must be called only for compound pages or any their tail sub-pages. + */ static inline int compound_mapcount(struct page *page) { VM_BUG_ON_PAGE(!PageCompound(page), page); @@ -592,10 +598,16 @@ static inline void page_mapcount_reset(struct page *page) int __page_mapcount(struct page *page); +/* + * Mapcount of 0-order page; when compound sub-page, includes + * compound_mapcount(). + * + * Result is undefined for pages which cannot be mapped into userspace. + * For example SLAB or special types of pages. See function page_has_type(). + * They use this place in struct page differently. + */ static inline int page_mapcount(struct page *page) { - VM_BUG_ON_PAGE(PageSlab(page), page); - if (unlikely(PageCompound(page))) return __page_mapcount(page); return atomic_read(&page->_mapcount) + 1; diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h index aded5a8a733dfadd272fdbde7494256e9ac8b983..f95c016b18a7c356723e7a0c925fabf0927aab21 100644 --- a/include/linux/mmc/core.h +++ b/include/linux/mmc/core.h @@ -180,8 +180,25 @@ struct mmc_request { #endif int tag; +#ifdef CONFIG_MMC_CRYPTO + int crypto_key_slot; + u64 data_unit_num; + const struct blk_crypto_key *crypto_key; +#endif }; +#ifdef CONFIG_MMC_CRYPTO +static inline bool mmc_request_crypto_enabled(const struct mmc_request *mrq) +{ + return mrq->crypto_key != NULL; +} +#else +static inline bool mmc_request_crypto_enabled(const struct mmc_request *mrq) +{ + return false; +} +#endif + struct mmc_card; struct mmc_cmdq_req; diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index ab2c6af4dca16d5d03b8d1810132162f7cdc8004..328e78d9281b49f8f9eb89cf67ac08d8c816fb01 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -509,6 +509,7 @@ struct mmc_host { u32 cached_caps2; #define MMC_CAP2_BOOTPART_NOACC (1 << 0) /* Boot partition no access */ +#define MMC_CAP2_CRYPTO (1 << 1) /* Host supports inline encryption */ #define MMC_CAP2_FULL_PWR_CYCLE (1 << 2) /* Can do full power cycle */ #define MMC_CAP2_HS200_1_8V_SDR (1 << 5) /* can support */ #define MMC_CAP2_HS200_1_2V_SDR (1 << 6) /* can support */ @@ -671,6 +672,10 @@ struct mmc_host { int cqe_qdepth; bool cqe_enabled; bool cqe_on; +#ifdef CONFIG_MMC_CRYPTO + struct keyslot_manager *ksm; + void *crypto_DO_NOT_USE[7]; +#endif /* CONFIG_MMC_CRYPTO */ #ifdef CONFIG_MMC_EMBEDDED_SDIO struct { diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index 129ceb61cfca80b4cc16e934e6845b13268be87f..9af6bc4d84930f259141bd3c04b2d4b78c0f55f2 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -619,6 +619,10 @@ struct mips_cdmm_device_id { /* * MODULE_DEVICE_TABLE expects this struct to be called x86cpu_device_id. * Although gcc seems to ignore this error, clang fails without this define. + * + * Note: The ordering of the struct is different from upstream because the + * static initializers in kernels < 5.7 still use C89 style while upstream + * has been converted to proper C99 initializers. */ #define x86cpu_device_id x86_cpu_id struct x86_cpu_id { @@ -627,6 +631,7 @@ struct x86_cpu_id { __u16 model; __u16 feature; /* bit index */ kernel_ulong_t driver_data; + __u16 steppings; }; #define X86_FEATURE_MATCH(x) \ @@ -635,6 +640,7 @@ struct x86_cpu_id { #define X86_VENDOR_ANY 0xffff #define X86_FAMILY_ANY 0 #define X86_MODEL_ANY 0 +#define X86_STEPPING_ANY 0 #define X86_FEATURE_ANY 0 /* Same as FPU, you can't test for that */ /* diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index 2b05f4273babda7bde2247c07bf90f9363a73e7e..e9a791f46eb6496a0e539ffd1635ba63f5424ee7 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -22,6 +22,7 @@ #include #include +struct nand_chip; struct mtd_info; struct nand_flash_dev; struct device_node; @@ -37,7 +38,7 @@ int nand_scan_ident(struct mtd_info *mtd, int max_chips, int nand_scan_tail(struct mtd_info *mtd); /* Unregister the MTD device and free resources held by the NAND device */ -void nand_release(struct mtd_info *mtd); +void nand_release(struct nand_chip *chip); /* Internal helper for board drivers which need to override command function */ void nand_wait_ready(struct mtd_info *mtd); @@ -227,9 +228,6 @@ enum nand_ecc_algo { #define NAND_CI_CELLTYPE_MSK 0x0C #define NAND_CI_CELLTYPE_SHIFT 2 -/* Keep gcc happy */ -struct nand_chip; - /* ONFI features */ #define ONFI_FEATURE_16_BIT_BUS (1 << 0) #define ONFI_FEATURE_EXT_PARAM_PAGE (1 << 7) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index a62f40f633a81e7f9f7e756ce7f3cb56ab84621d..789e84ee019f23237c6f0b38f010ad9af147db82 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2483,7 +2483,7 @@ void synchronize_net(void); int init_dummy_netdev(struct net_device *dev); DECLARE_PER_CPU(int, xmit_recursion); -#define XMIT_RECURSION_LIMIT 10 +#define XMIT_RECURSION_LIMIT 8 static inline int dev_recursion_level(void) { diff --git a/include/linux/netfilter/nf_conntrack_pptp.h b/include/linux/netfilter/nf_conntrack_pptp.h index 833a5b2255ea4d84f324ed8f28f0e73bee9f9c82..ade993809ebc7f076a74bc02d4c4564417e60ce5 100644 --- a/include/linux/netfilter/nf_conntrack_pptp.h +++ b/include/linux/netfilter/nf_conntrack_pptp.h @@ -5,7 +5,7 @@ #include -extern const char *const pptp_msg_name[]; +const char *pptp_msg_name(u_int16_t msg); /* state of the control session */ enum pptp_ctrlsess_state { diff --git a/include/linux/padata.h b/include/linux/padata.h index 2f9c1f93b1ce4e9da4b8e490abfc2c6a786dbb53..d803397a28f70be39443e57816d546baffbda43a 100644 --- a/include/linux/padata.h +++ b/include/linux/padata.h @@ -24,7 +24,6 @@ #include #include #include -#include #include #include @@ -37,6 +36,7 @@ * @list: List entry, to attach to the padata lists. * @pd: Pointer to the internal control structure. * @cb_cpu: Callback cpu for serializatioon. + * @cpu: Cpu for parallelization. * @seq_nr: Sequence number of the parallelized data object. * @info: Used to pass information from the parallel to the serial function. * @parallel: Parallel execution function. @@ -46,6 +46,7 @@ struct padata_priv { struct list_head list; struct parallel_data *pd; int cb_cpu; + int cpu; int info; void (*parallel)(struct padata_priv *padata); void (*serial)(struct padata_priv *padata); @@ -83,7 +84,6 @@ struct padata_serial_queue { * @serial: List to wait for serialization after reordering. * @pwork: work struct for parallelization. * @swork: work struct for serialization. - * @pd: Backpointer to the internal control structure. * @work: work struct for parallelization. * @num_obj: Number of objects that are processed by this cpu. * @cpu_index: Index of the cpu. @@ -91,7 +91,6 @@ struct padata_serial_queue { struct padata_parallel_queue { struct padata_list parallel; struct padata_list reorder; - struct parallel_data *pd; struct work_struct work; atomic_t num_obj; int cpu_index; @@ -118,10 +117,10 @@ struct padata_cpumask { * @reorder_objects: Number of objects waiting in the reorder queues. * @refcnt: Number of objects holding a reference on this parallel_data. * @max_seq_nr: Maximal used sequence number. + * @cpu: Next CPU to be processed. * @cpumask: The cpumasks in use for parallel and serial workers. + * @reorder_work: work struct for reordering. * @lock: Reorder lock. - * @processed: Number of already processed objects. - * @timer: Reorder timer. */ struct parallel_data { struct padata_instance *pinst; @@ -130,10 +129,10 @@ struct parallel_data { atomic_t reorder_objects; atomic_t refcnt; atomic_t seq_nr; + int cpu; struct padata_cpumask cpumask; + struct work_struct reorder_work; spinlock_t lock ____cacheline_aligned; - unsigned int processed; - struct timer_list timer; }; /** diff --git a/include/linux/parser.h b/include/linux/parser.h index 12fc3482f5fc7ae1135d2a7f9a10358d88a9bcf9..89e2b23fb888e82edacdac2293d3ce5b70de78e5 100644 --- a/include/linux/parser.h +++ b/include/linux/parser.h @@ -7,7 +7,8 @@ * but could potentially be used anywhere else that simple option=arg * parsing is required. */ - +#ifndef _LINUX_PARSER_H +#define _LINUX_PARSER_H /* associates an integer enumerator with a pattern string. */ struct match_token { @@ -34,3 +35,5 @@ int match_hex(substring_t *, int *result); bool match_wildcard(const char *pattern, const char *str); size_t match_strlcpy(char *, const substring_t *, size_t); char *match_strdup(const substring_t *); + +#endif /* _LINUX_PARSER_H */ diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index f93437e828835de85f741c979bc1f52b44943c38..b5f3d24ddb25c8b89f1a4c45074dc79722d81e76 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -45,6 +45,7 @@ #define PCI_CLASS_MULTIMEDIA_VIDEO 0x0400 #define PCI_CLASS_MULTIMEDIA_AUDIO 0x0401 #define PCI_CLASS_MULTIMEDIA_PHONE 0x0402 +#define PCI_CLASS_MULTIMEDIA_HD_AUDIO 0x0403 #define PCI_CLASS_MULTIMEDIA_OTHER 0x0480 #define PCI_BASE_CLASS_MEMORY 0x05 @@ -1333,6 +1334,7 @@ #define PCI_DEVICE_ID_IMS_TT3D 0x9135 #define PCI_VENDOR_ID_AMCC 0x10e8 +#define PCI_VENDOR_ID_AMPERE 0x1def #define PCI_VENDOR_ID_INTERG 0x10ea #define PCI_DEVICE_ID_INTERG_1682 0x1682 diff --git a/include/linux/pnp.h b/include/linux/pnp.h index 3b12fd28af78d1142c9503d3908d6c39823184ce..fc4df3ccefc9607d8f5197021c85498b5e5c8d66 100644 --- a/include/linux/pnp.h +++ b/include/linux/pnp.h @@ -220,10 +220,8 @@ struct pnp_card { #define global_to_pnp_card(n) list_entry(n, struct pnp_card, global_list) #define protocol_to_pnp_card(n) list_entry(n, struct pnp_card, protocol_list) #define to_pnp_card(n) container_of(n, struct pnp_card, dev) -#define pnp_for_each_card(card) \ - for((card) = global_to_pnp_card(pnp_cards.next); \ - (card) != global_to_pnp_card(&pnp_cards); \ - (card) = global_to_pnp_card((card)->global_list.next)) +#define pnp_for_each_card(card) \ + list_for_each_entry(card, &pnp_cards, global_list) struct pnp_card_link { struct pnp_card *card; @@ -276,14 +274,9 @@ struct pnp_dev { #define card_to_pnp_dev(n) list_entry(n, struct pnp_dev, card_list) #define protocol_to_pnp_dev(n) list_entry(n, struct pnp_dev, protocol_list) #define to_pnp_dev(n) container_of(n, struct pnp_dev, dev) -#define pnp_for_each_dev(dev) \ - for((dev) = global_to_pnp_dev(pnp_global.next); \ - (dev) != global_to_pnp_dev(&pnp_global); \ - (dev) = global_to_pnp_dev((dev)->global_list.next)) -#define card_for_each_dev(card,dev) \ - for((dev) = card_to_pnp_dev((card)->devices.next); \ - (dev) != card_to_pnp_dev(&(card)->devices); \ - (dev) = card_to_pnp_dev((dev)->card_list.next)) +#define pnp_for_each_dev(dev) list_for_each_entry(dev, &pnp_global, global_list) +#define card_for_each_dev(card, dev) \ + list_for_each_entry(dev, &(card)->devices, card_list) #define pnp_dev_name(dev) (dev)->name static inline void *pnp_get_drvdata(struct pnp_dev *pdev) @@ -437,14 +430,10 @@ struct pnp_protocol { }; #define to_pnp_protocol(n) list_entry(n, struct pnp_protocol, protocol_list) -#define protocol_for_each_card(protocol,card) \ - for((card) = protocol_to_pnp_card((protocol)->cards.next); \ - (card) != protocol_to_pnp_card(&(protocol)->cards); \ - (card) = protocol_to_pnp_card((card)->protocol_list.next)) -#define protocol_for_each_dev(protocol,dev) \ - for((dev) = protocol_to_pnp_dev((protocol)->devices.next); \ - (dev) != protocol_to_pnp_dev(&(protocol)->devices); \ - (dev) = protocol_to_pnp_dev((dev)->protocol_list.next)) +#define protocol_for_each_card(protocol, card) \ + list_for_each_entry(card, &(protocol)->cards, protocol_list) +#define protocol_for_each_dev(protocol, dev) \ + list_for_each_entry(dev, &(protocol)->devices, protocol_list) extern struct bus_type pnp_bus_type; diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h index 733fad7dfbed96c0c55f3d6b2f44ca5cede61a86..6d15040c642cb0ff12e4285f4c3691d62961e018 100644 --- a/include/linux/qed/qed_chain.h +++ b/include/linux/qed/qed_chain.h @@ -207,28 +207,34 @@ static inline u32 qed_chain_get_cons_idx_u32(struct qed_chain *p_chain) static inline u16 qed_chain_get_elem_left(struct qed_chain *p_chain) { + u16 elem_per_page = p_chain->elem_per_page; + u32 prod = p_chain->u.chain16.prod_idx; + u32 cons = p_chain->u.chain16.cons_idx; u16 used; - used = (u16) (((u32)0x10000 + - (u32)p_chain->u.chain16.prod_idx) - - (u32)p_chain->u.chain16.cons_idx); + if (prod < cons) + prod += (u32)U16_MAX + 1; + + used = (u16)(prod - cons); if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR) - used -= p_chain->u.chain16.prod_idx / p_chain->elem_per_page - - p_chain->u.chain16.cons_idx / p_chain->elem_per_page; + used -= prod / elem_per_page - cons / elem_per_page; return (u16)(p_chain->capacity - used); } static inline u32 qed_chain_get_elem_left_u32(struct qed_chain *p_chain) { + u16 elem_per_page = p_chain->elem_per_page; + u64 prod = p_chain->u.chain32.prod_idx; + u64 cons = p_chain->u.chain32.cons_idx; u32 used; - used = (u32) (((u64)0x100000000ULL + - (u64)p_chain->u.chain32.prod_idx) - - (u64)p_chain->u.chain32.cons_idx); + if (prod < cons) + prod += (u64)U32_MAX + 1; + + used = (u32)(prod - cons); if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR) - used -= p_chain->u.chain32.prod_idx / p_chain->elem_per_page - - p_chain->u.chain32.cons_idx / p_chain->elem_per_page; + used -= (u32)(prod / elem_per_page - cons / elem_per_page); return p_chain->capacity - used; } diff --git a/include/linux/string.h b/include/linux/string.h index 3d43329c20bef14019f6e3fc6d3c2cda87ce1abe..315fef3aff4e683768fd62c47a0fd8b436e14c1b 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -238,6 +238,31 @@ void __read_overflow3(void) __compiletime_error("detected read beyond size of ob void __write_overflow(void) __compiletime_error("detected write beyond size of object passed as 1st parameter"); #if !defined(__NO_FORTIFY) && defined(__OPTIMIZE__) && defined(CONFIG_FORTIFY_SOURCE) + +#ifdef CONFIG_KASAN +extern void *__underlying_memchr(const void *p, int c, __kernel_size_t size) __RENAME(memchr); +extern int __underlying_memcmp(const void *p, const void *q, __kernel_size_t size) __RENAME(memcmp); +extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(memcpy); +extern void *__underlying_memmove(void *p, const void *q, __kernel_size_t size) __RENAME(memmove); +extern void *__underlying_memset(void *p, int c, __kernel_size_t size) __RENAME(memset); +extern char *__underlying_strcat(char *p, const char *q) __RENAME(strcat); +extern char *__underlying_strcpy(char *p, const char *q) __RENAME(strcpy); +extern __kernel_size_t __underlying_strlen(const char *p) __RENAME(strlen); +extern char *__underlying_strncat(char *p, const char *q, __kernel_size_t count) __RENAME(strncat); +extern char *__underlying_strncpy(char *p, const char *q, __kernel_size_t size) __RENAME(strncpy); +#else +#define __underlying_memchr __builtin_memchr +#define __underlying_memcmp __builtin_memcmp +#define __underlying_memcpy __builtin_memcpy +#define __underlying_memmove __builtin_memmove +#define __underlying_memset __builtin_memset +#define __underlying_strcat __builtin_strcat +#define __underlying_strcpy __builtin_strcpy +#define __underlying_strlen __builtin_strlen +#define __underlying_strncat __builtin_strncat +#define __underlying_strncpy __builtin_strncpy +#endif + __FORTIFY_INLINE char *strncpy(char *p, const char *q, __kernel_size_t size) { size_t p_size = __builtin_object_size(p, 0); @@ -245,14 +270,14 @@ __FORTIFY_INLINE char *strncpy(char *p, const char *q, __kernel_size_t size) __write_overflow(); if (p_size < size) fortify_panic(__func__); - return __builtin_strncpy(p, q, size); + return __underlying_strncpy(p, q, size); } __FORTIFY_INLINE char *strcat(char *p, const char *q) { size_t p_size = __builtin_object_size(p, 0); if (p_size == (size_t)-1) - return __builtin_strcat(p, q); + return __underlying_strcat(p, q); if (strlcat(p, q, p_size) >= p_size) fortify_panic(__func__); return p; @@ -266,7 +291,7 @@ __FORTIFY_INLINE __kernel_size_t strlen(const char *p) /* Work around gcc excess stack consumption issue */ if (p_size == (size_t)-1 || (__builtin_constant_p(p[p_size - 1]) && p[p_size - 1] == '\0')) - return __builtin_strlen(p); + return __underlying_strlen(p); ret = strnlen(p, p_size); if (p_size <= ret) fortify_panic(__func__); @@ -299,7 +324,7 @@ __FORTIFY_INLINE size_t strlcpy(char *p, const char *q, size_t size) __write_overflow(); if (len >= p_size) fortify_panic(__func__); - __builtin_memcpy(p, q, len); + __underlying_memcpy(p, q, len); p[len] = '\0'; } return ret; @@ -312,12 +337,12 @@ __FORTIFY_INLINE char *strncat(char *p, const char *q, __kernel_size_t count) size_t p_size = __builtin_object_size(p, 0); size_t q_size = __builtin_object_size(q, 0); if (p_size == (size_t)-1 && q_size == (size_t)-1) - return __builtin_strncat(p, q, count); + return __underlying_strncat(p, q, count); p_len = strlen(p); copy_len = strnlen(q, count); if (p_size < p_len + copy_len + 1) fortify_panic(__func__); - __builtin_memcpy(p + p_len, q, copy_len); + __underlying_memcpy(p + p_len, q, copy_len); p[p_len + copy_len] = '\0'; return p; } @@ -329,7 +354,7 @@ __FORTIFY_INLINE void *memset(void *p, int c, __kernel_size_t size) __write_overflow(); if (p_size < size) fortify_panic(__func__); - return __builtin_memset(p, c, size); + return __underlying_memset(p, c, size); } __FORTIFY_INLINE void *memcpy(void *p, const void *q, __kernel_size_t size) @@ -344,7 +369,7 @@ __FORTIFY_INLINE void *memcpy(void *p, const void *q, __kernel_size_t size) } if (p_size < size || q_size < size) fortify_panic(__func__); - return __builtin_memcpy(p, q, size); + return __underlying_memcpy(p, q, size); } __FORTIFY_INLINE void *memmove(void *p, const void *q, __kernel_size_t size) @@ -359,7 +384,7 @@ __FORTIFY_INLINE void *memmove(void *p, const void *q, __kernel_size_t size) } if (p_size < size || q_size < size) fortify_panic(__func__); - return __builtin_memmove(p, q, size); + return __underlying_memmove(p, q, size); } extern void *__real_memscan(void *, int, __kernel_size_t) __RENAME(memscan); @@ -385,7 +410,7 @@ __FORTIFY_INLINE int memcmp(const void *p, const void *q, __kernel_size_t size) } if (p_size < size || q_size < size) fortify_panic(__func__); - return __builtin_memcmp(p, q, size); + return __underlying_memcmp(p, q, size); } __FORTIFY_INLINE void *memchr(const void *p, int c, __kernel_size_t size) @@ -395,7 +420,7 @@ __FORTIFY_INLINE void *memchr(const void *p, int c, __kernel_size_t size) __read_overflow(); if (p_size < size) fortify_panic(__func__); - return __builtin_memchr(p, c, size); + return __underlying_memchr(p, c, size); } void *__real_memchr_inv(const void *s, int c, size_t n) __RENAME(memchr_inv); @@ -426,11 +451,22 @@ __FORTIFY_INLINE char *strcpy(char *p, const char *q) size_t p_size = __builtin_object_size(p, 0); size_t q_size = __builtin_object_size(q, 0); if (p_size == (size_t)-1 && q_size == (size_t)-1) - return __builtin_strcpy(p, q); + return __underlying_strcpy(p, q); memcpy(p, q, strlen(q) + 1); return p; } +/* Don't use these outside the FORITFY_SOURCE implementation */ +#undef __underlying_memchr +#undef __underlying_memcmp +#undef __underlying_memcpy +#undef __underlying_memmove +#undef __underlying_memset +#undef __underlying_strcat +#undef __underlying_strcpy +#undef __underlying_strlen +#undef __underlying_strncat +#undef __underlying_strncpy #endif /** diff --git a/include/linux/sunrpc/gss_api.h b/include/linux/sunrpc/gss_api.h index 5ac5db4d295f9487ced510634c2b2b65c5f7d20b..566d5f547567e2818cb260d8f039ed7929ba4be0 100644 --- a/include/linux/sunrpc/gss_api.h +++ b/include/linux/sunrpc/gss_api.h @@ -83,6 +83,7 @@ struct pf_desc { u32 service; char *name; char *auth_domain_name; + struct auth_domain *domain; bool datatouch; }; diff --git a/include/linux/sunrpc/svcauth_gss.h b/include/linux/sunrpc/svcauth_gss.h index a4528b26c8aad78e523c8aa327054cf9c22ccee3..d229d27ab19e3154c45813b2126f0b274433f484 100644 --- a/include/linux/sunrpc/svcauth_gss.h +++ b/include/linux/sunrpc/svcauth_gss.h @@ -21,7 +21,8 @@ int gss_svc_init(void); void gss_svc_shutdown(void); int gss_svc_init_net(struct net *net); void gss_svc_shutdown_net(struct net *net); -int svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name); +struct auth_domain *svcauth_gss_register_pseudoflavor(u32 pseudoflavor, + char *name); u32 svcauth_gss_flavor(struct auth_domain *dom); #endif /* __KERNEL__ */ diff --git a/include/linux/timer.h b/include/linux/timer.h index 83b405f6b1b9504634bcb7984e898ebe063b98f8..1b85e41d67eaca7c0443c8088847d1f928f31156 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h @@ -21,6 +21,9 @@ struct timer_list { unsigned long data; u32 flags; +#ifdef CONFIG_CFI_CLANG + void (*__function)(struct timer_list *); +#endif #ifdef CONFIG_LOCKDEP struct lockdep_map lockdep_map; #endif @@ -172,6 +175,30 @@ static inline void init_timer_on_stack_key(struct timer_list *timer, #define TIMER_DATA_TYPE unsigned long #define TIMER_FUNC_TYPE void (*)(TIMER_DATA_TYPE) +#ifdef CONFIG_CFI_CLANG +/* + * With CFI_CLANG, we cannot cast the callback function to TIMER_FUNC_TYPE + * without tripping an indirect call check in call_timer_fn. Therefore, we + * add a new field to struct timer_list and use __timer_callback to perform + * the indirect call using the correct function pointer. + */ +static inline void __timer_callback(unsigned long data) +{ + struct timer_list *timer = (struct timer_list *)data; + + timer->__function(timer); +} + +static inline void timer_setup(struct timer_list *timer, + void (*callback)(struct timer_list *), + unsigned int flags) +{ + timer->__function = callback; + + __setup_timer(timer, __timer_callback, + (TIMER_DATA_TYPE)timer, flags); +} +#else static inline void timer_setup(struct timer_list *timer, void (*callback)(struct timer_list *), unsigned int flags) @@ -179,6 +206,7 @@ static inline void timer_setup(struct timer_list *timer, __setup_timer(timer, (TIMER_FUNC_TYPE)callback, (TIMER_DATA_TYPE)timer, flags); } +#endif #define from_timer(var, callback_timer, timer_fieldname) \ container_of(callback_timer, typeof(*var), timer_fieldname) diff --git a/include/linux/tty.h b/include/linux/tty.h index 3e9c74197a8ffbea1ca39e70ff980420484d208e..470c8c0c6a436d32b80fd7f53ca9db18c108b62b 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -65,7 +65,7 @@ struct tty_buffer { int read; int flags; /* Data points here */ - unsigned long data[0]; + unsigned long data[]; }; /* Values for .flags field of tty_buffer */ diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index f74c846603d873e91e8a7da1642c5cf1d070aa55..68445c751c032ee3cf85f775238f4fc252c48aa1 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -267,7 +267,7 @@ extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count); probe_kernel_read(&retval, addr, sizeof(retval)) #ifndef user_access_begin -#define user_access_begin() do { } while (0) +#define user_access_begin(type, ptr, len) access_ok(type, ptr, len) #define user_access_end() do { } while (0) #define unsafe_get_user(x, ptr, err) do { if (unlikely(__get_user(x, ptr))) goto err; } while (0) #define unsafe_put_user(x, ptr, err) do { if (unlikely(__put_user(x, ptr))) goto err; } while (0) diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h index 6f4942c5e4c3661f67554dc0cf1dc284aad725e3..218520253ff280bcc4fcf19b25c72a7483925209 100644 --- a/include/linux/usb/composite.h +++ b/include/linux/usb/composite.h @@ -270,6 +270,9 @@ int usb_func_wakeup(struct usb_function *func); int usb_get_func_interface_id(struct usb_function *func); +int config_ep_by_speed_and_alt(struct usb_gadget *g, struct usb_function *f, + struct usb_ep *_ep, u8 alt); + int config_ep_by_speed(struct usb_gadget *g, struct usb_function *f, struct usb_ep *_ep); diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h index 960bedbdec87b3e4586f487d57223dccc8b8a4fd..77f0f0af3a7105c2e6e400be65cb23da3d1a2019 100644 --- a/include/linux/vga_switcheroo.h +++ b/include/linux/vga_switcheroo.h @@ -168,11 +168,8 @@ int vga_switcheroo_process_delayed_switch(void); bool vga_switcheroo_client_probe_defer(struct pci_dev *pdev); enum vga_switcheroo_state vga_switcheroo_get_client_state(struct pci_dev *dev); -void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic); - int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain); void vga_switcheroo_fini_domain_pm_ops(struct device *dev); -int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain); #else static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {} @@ -192,11 +189,8 @@ static inline int vga_switcheroo_process_delayed_switch(void) { return 0; } static inline bool vga_switcheroo_client_probe_defer(struct pci_dev *pdev) { return false; } static inline enum vga_switcheroo_state vga_switcheroo_get_client_state(struct pci_dev *dev) { return VGA_SWITCHEROO_ON; } -static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {} - static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; } static inline void vga_switcheroo_fini_domain_pm_ops(struct device *dev) {} -static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; } #endif #endif /* _LINUX_VGA_SWITCHEROO_H_ */ diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h index e0348cb0a1dd7d2f3320e58a7ec1cc76e4a8799b..d19bfdcf77498d8a8a893153bf3bcedfe4f71b5e 100644 --- a/include/linux/virtio_net.h +++ b/include/linux/virtio_net.h @@ -3,6 +3,8 @@ #define _LINUX_VIRTIO_NET_H #include +#include +#include #include static inline int virtio_net_hdr_set_proto(struct sk_buff *skb, @@ -28,17 +30,26 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb, bool little_endian) { unsigned int gso_type = 0; + unsigned int thlen = 0; + unsigned int p_off = 0; + unsigned int ip_proto; if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { case VIRTIO_NET_HDR_GSO_TCPV4: gso_type = SKB_GSO_TCPV4; + ip_proto = IPPROTO_TCP; + thlen = sizeof(struct tcphdr); break; case VIRTIO_NET_HDR_GSO_TCPV6: gso_type = SKB_GSO_TCPV6; + ip_proto = IPPROTO_TCP; + thlen = sizeof(struct tcphdr); break; case VIRTIO_NET_HDR_GSO_UDP: gso_type = SKB_GSO_UDP; + ip_proto = IPPROTO_UDP; + thlen = sizeof(struct udphdr); break; default: return -EINVAL; @@ -57,16 +68,21 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb, if (!skb_partial_csum_set(skb, start, off)) return -EINVAL; + + p_off = skb_transport_offset(skb) + thlen; + if (p_off > skb_headlen(skb)) + return -EINVAL; } else { /* gso packets without NEEDS_CSUM do not set transport_offset. * probe and drop if does not match one of the above types. */ if (gso_type && skb->network_header) { + struct flow_keys keys; + if (!skb->protocol) virtio_net_hdr_set_proto(skb, hdr); retry: - skb_probe_transport_header(skb, -1); - if (!skb_transport_header_was_set(skb)) { + if (!skb_flow_dissect_flow_keys(skb, &keys, 0)) { /* UFO does not specify ipv4 or 6: try both */ if (gso_type & SKB_GSO_UDP && skb->protocol == htons(ETH_P_IP)) { @@ -75,18 +91,33 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb, } return -EINVAL; } + + p_off = keys.control.thoff + thlen; + if (p_off > skb_headlen(skb) || + keys.basic.ip_proto != ip_proto) + return -EINVAL; + + skb_set_transport_header(skb, keys.control.thoff); + } else if (gso_type) { + p_off = thlen; + if (p_off > skb_headlen(skb)) + return -EINVAL; } } if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { u16 gso_size = __virtio16_to_cpu(little_endian, hdr->gso_size); + struct skb_shared_info *shinfo = skb_shinfo(skb); - skb_shinfo(skb)->gso_size = gso_size; - skb_shinfo(skb)->gso_type = gso_type; + /* Too small packets are not really GSO ones. */ + if (skb->len - p_off > gso_size) { + shinfo->gso_size = gso_size; + shinfo->gso_type = gso_type; - /* Header must be checked, and gso_segs computed. */ - skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; - skb_shinfo(skb)->gso_segs = 0; + /* Header must be checked, and gso_segs computed. */ + shinfo->gso_type |= SKB_GSO_DODGY; + shinfo->gso_segs = 0; + } } return 0; diff --git a/include/net/act_api.h b/include/net/act_api.h index 775387d6ca950a04baea65e757f3c0810b758068..ff268bb0c60fea43025a6b0b666c983780cbd463 100644 --- a/include/net/act_api.h +++ b/include/net/act_api.h @@ -69,7 +69,8 @@ static inline void tcf_tm_dump(struct tcf_t *dtm, const struct tcf_t *stm) { dtm->install = jiffies_to_clock_t(jiffies - stm->install); dtm->lastuse = jiffies_to_clock_t(jiffies - stm->lastuse); - dtm->firstuse = jiffies_to_clock_t(jiffies - stm->firstuse); + dtm->firstuse = stm->firstuse ? + jiffies_to_clock_t(jiffies - stm->firstuse) : 0; dtm->expires = jiffies_to_clock_t(stm->expires); } diff --git a/include/net/addrconf.h b/include/net/addrconf.h index bcd9b88bc4e8c58c4f48e3acfa6a410ca34c5716..4e5316a8fbf239d89b1b54dd03100fb65222fa25 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h @@ -223,8 +223,10 @@ struct ipv6_stub { const struct in6_addr *addr); int (*ipv6_sock_mc_drop)(struct sock *sk, int ifindex, const struct in6_addr *addr); - int (*ipv6_dst_lookup)(struct net *net, struct sock *sk, - struct dst_entry **dst, struct flowi6 *fl6); + struct dst_entry *(*ipv6_dst_lookup_flow)(struct net *net, + const struct sock *sk, + struct flowi6 *fl6, + const struct in6_addr *final_dst); void (*udpv6_encap_enable)(void); void (*ndisc_send_na)(struct net_device *dev, const struct in6_addr *daddr, const struct in6_addr *solicited_addr, diff --git a/include/net/cnss2.h b/include/net/cnss2.h index da12173f1b2338462f1c7705280b61c721efee5a..95e872cc8b015474edc74d88dc0b4fdf3370ec34 100644 --- a/include/net/cnss2.h +++ b/include/net/cnss2.h @@ -95,6 +95,7 @@ struct cnss_wlan_driver { void (*update_status)(struct pci_dev *pdev, uint32_t status); struct cnss_wlan_runtime_ops *runtime_ops; const struct pci_device_id *id_table; + enum cnss_driver_mode (*get_driver_mode)(void); }; struct cnss_usb_wlan_driver { @@ -208,6 +209,7 @@ extern int cnss_get_platform_cap(struct device *dev, extern struct dma_iommu_mapping *cnss_smmu_get_mapping(struct device *dev); extern int cnss_smmu_map(struct device *dev, phys_addr_t paddr, uint32_t *iova_addr, size_t size); +extern int cnss_smmu_unmap(struct device *dev, uint32_t iova_addr, size_t size); extern int cnss_get_soc_info(struct device *dev, struct cnss_soc_info *info); extern int cnss_request_bus_bandwidth(struct device *dev, int bandwidth); extern int cnss_power_up(struct device *dev); diff --git a/include/net/dst.h b/include/net/dst.h index 8c714eb6e18b5438a4b869b8c92bc8ed3838d1fd..3a05256c484d2cafb18ad669b35e469b59890555 100644 --- a/include/net/dst.h +++ b/include/net/dst.h @@ -427,7 +427,15 @@ static inline struct neighbour *dst_neigh_lookup(const struct dst_entry *dst, co static inline struct neighbour *dst_neigh_lookup_skb(const struct dst_entry *dst, struct sk_buff *skb) { - struct neighbour *n = dst->ops->neigh_lookup(dst, skb, NULL); + struct neighbour *n = NULL; + + /* The packets from tunnel devices (eg bareudp) may have only + * metadata in the dst pointer of skb. Hence a pointer check of + * neigh_lookup is needed. + */ + if (dst->ops->neigh_lookup) + n = dst->ops->neigh_lookup(dst, skb, NULL); + return IS_ERR(n) ? NULL : n; } diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 5ea1bb50bb40a78fcc98f0e89626a36b01c4c53b..c530b009d3c37da96fdf9ad6e2148553d3bfa4e6 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -864,7 +864,7 @@ static inline struct sk_buff *ip6_finish_skb(struct sock *sk) int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst, struct flowi6 *fl6); -struct dst_entry *ip6_dst_lookup_flow(const struct sock *sk, struct flowi6 *fl6, +struct dst_entry *ip6_dst_lookup_flow(struct net *net, const struct sock *sk, struct flowi6 *fl6, const struct in6_addr *final_dst); struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6, const struct in6_addr *final_dst); diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index 62ab1b0d98bb8f26e98ba0d12d4f270eef583642..1a587cdbfb209e85c14092a500bca094fbfa9279 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h @@ -94,7 +94,7 @@ struct nf_conn { struct hlist_node nat_bysource; #endif /* all members below initialized via memset */ - u8 __nfct_init_offset[0]; + struct { } __nfct_init_offset; /* If we were expected by an expectation, this will be it */ struct nf_conn *master; diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index e268c970ec54b632d309ed0d449f0558965c0bf1..fefc0b7158270e076103e85ae908562194e858ac 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -128,6 +128,7 @@ struct netns_ipv4 { int sysctl_tcp_sack; int sysctl_tcp_window_scaling; int sysctl_tcp_timestamps; + int sysctl_tcp_default_init_rwnd; struct inet_timewait_death_row tcp_death_row; int sysctl_max_syn_backlog; diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h index deaafa9b09cbea18a438070c0579ababf73dcfba..d4da07048aa3e4c9d3713d72e73cfc14772f8ae2 100644 --- a/include/net/sctp/constants.h +++ b/include/net/sctp/constants.h @@ -361,11 +361,13 @@ enum { ipv4_is_anycast_6to4(a)) /* Flags used for the bind address copy functions. */ -#define SCTP_ADDR6_ALLOWED 0x00000001 /* IPv6 address is allowed by +#define SCTP_ADDR4_ALLOWED 0x00000001 /* IPv4 address is allowed by local sock family */ -#define SCTP_ADDR4_PEERSUPP 0x00000002 /* IPv4 address is supported by +#define SCTP_ADDR6_ALLOWED 0x00000002 /* IPv6 address is allowed by + local sock family */ +#define SCTP_ADDR4_PEERSUPP 0x00000004 /* IPv4 address is supported by peer */ -#define SCTP_ADDR6_PEERSUPP 0x00000004 /* IPv6 address is supported by +#define SCTP_ADDR6_PEERSUPP 0x00000008 /* IPv6 address is supported by peer */ /* Reasons to retransmit. */ diff --git a/include/net/sock.h b/include/net/sock.h index d2454cf4c223a8d6d6d350926ab179e7536ecf00..ebf247618722aad227112c377f78d44412760bde 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1688,7 +1688,6 @@ static inline int sk_tx_queue_get(const struct sock *sk) static inline void sk_set_socket(struct sock *sk, struct socket *sock) { - sk_tx_queue_clear(sk); sk->sk_socket = sock; } diff --git a/include/net/tcp.h b/include/net/tcp.h index 2392c6544696ca25a420fd2aab9f66bb356b8425..ea72414ab52ed93ca0a44476de1ee5a6b7a5e3fe 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -280,7 +280,6 @@ extern int sysctl_tcp_autocorking; extern int sysctl_tcp_invalid_ratelimit; extern int sysctl_tcp_pacing_ss_ratio; extern int sysctl_tcp_pacing_ca_ratio; -extern int sysctl_tcp_default_init_rwnd; extern atomic_long_t tcp_memory_allocated; @@ -1332,7 +1331,7 @@ static inline void tcp_sack_reset(struct tcp_options_received *rx_opt) rx_opt->num_sacks = 0; } -u32 tcp_default_init_rwnd(u32 mss); +u32 tcp_default_init_rwnd(struct net *net, u32 mss); void tcp_cwnd_restart(struct sock *sk, s32 delta); static inline void tcp_slow_start_after_idle_check(struct sock *sk) @@ -1350,7 +1349,8 @@ static inline void tcp_slow_start_after_idle_check(struct sock *sk) } /* Determine a window scaling and initial window to offer. */ -void tcp_select_initial_window(int __space, __u32 mss, __u32 *rcv_wnd, +void tcp_select_initial_window(struct net *net, + int __space, __u32 mss, __u32 *rcv_wnd, __u32 *window_clamp, int wscale_ok, __u8 *rcv_wscale, __u32 init_rcv_wnd); diff --git a/include/soc/qcom/qseecomi.h b/include/soc/qcom/qseecomi.h index ee2270f89b8138f07baea31e83293a0ef5989cc1..b011efb8600f23dbdd4f5208f97de5784716ba60 100644 --- a/include/soc/qcom/qseecomi.h +++ b/include/soc/qcom/qseecomi.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. + * Copyright (c) 2013-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -69,6 +69,7 @@ enum qseecom_qceos_cmd_id { QSEOS_TEE_OPEN_SESSION_WHITELIST = 0x1D, QSEOS_TEE_INVOKE_COMMAND_WHITELIST = 0x1E, QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST = 0x1F, + QSEOS_SOTA_NOTIFICATION_CHECK_STATUS = 0x20, QSEOS_FSM_LTEOTA_REQ_CMD = 0x109, QSEOS_FSM_LTEOTA_REQ_RSP_CMD = 0x110, QSEOS_FSM_IKE_REQ_CMD = 0x203, @@ -349,6 +350,9 @@ struct qseecom_continue_blocked_request_ireq { #define TZ_SVC_ES 16 /* Enterprise Security */ #define TZ_SVC_MDTP 18 /* Mobile Device Theft */ +/** SIP service call groups */ +#define TZ_SVC_FUSE 8 /* Fuse services. */ + /*---------------------------------------------------------------------------- * Owning Entity IDs (defined by ARM SMC doc) * --------------------------------------------------------------------------- @@ -587,6 +591,12 @@ struct qseecom_continue_blocked_request_ireq { #define TZ_OS_RPMB_CHECK_PROV_STATUS_ID_PARAM_ID \ TZ_SYSCALL_CREATE_PARAM_ID_0 +#define TZ_SOTA_UPDATE_NOTIFICATION_ID \ + TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_SIP, TZ_SVC_FUSE, 0x07) + +#define TZ_SOTA_UPDATE_NOTIFICATION_ID_PARAM_ID \ + TZ_SYSCALL_CREATE_PARAM_ID_0 + #define TZ_OS_KS_GEN_KEY_ID \ TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_KEYSTORE, 0x01) @@ -728,4 +738,6 @@ struct qseecom_continue_blocked_request_ireq { TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_VAL, \ TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL) + + #endif /* __QSEECOMI_H_ */ diff --git a/include/soc/qcom/sb_notification.h b/include/soc/qcom/sb_notification.h index 20bbe73344192ac6a866eef68750fe4f1c0a6716..93b79ccb86cc1edc1a2a53e0055dbd6143155577 100644 --- a/include/soc/qcom/sb_notification.h +++ b/include/soc/qcom/sb_notification.h @@ -16,7 +16,7 @@ #define _SB_NOTIFICATION_H /* Indicates a system wake up event */ -#define EVT_WAKE_UP 0x01 +#define EVENT_REQUEST_WAKE_UP 0x01 #ifdef CONFIG_QTI_NOTIFY_SIDEBAND /** diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h index ed8e7780aa0cc7c481a458b4321ed447ba7031c6..ca0fa6193353cf1d6432bd90e273ccc94036973c 100644 --- a/include/sound/compress_driver.h +++ b/include/sound/compress_driver.h @@ -72,6 +72,7 @@ struct snd_compr_runtime { * @direction: stream direction, playback/recording * @metadata_set: metadata set flag, true when set * @next_track: has userspace signal next track transition, true when set + * @partial_drain: undergoing partial_drain for stream, true when set * @private_data: pointer to DSP private data */ struct snd_compr_stream { @@ -83,6 +84,7 @@ struct snd_compr_stream { enum snd_compr_direction direction; bool metadata_set; bool next_track; + bool partial_drain; void *private_data; struct snd_soc_pcm_runtime *be; }; @@ -192,7 +194,13 @@ static inline void snd_compr_drain_notify(struct snd_compr_stream *stream) if (snd_BUG_ON(!stream)) return; - stream->runtime->state = SNDRV_PCM_STATE_SETUP; + /* for partial_drain case we are back to running state on success */ + if (stream->partial_drain) { + stream->runtime->state = SNDRV_PCM_STATE_RUNNING; + stream->partial_drain = false; /* clear this flag as well */ + } else { + stream->runtime->state = SNDRV_PCM_STATE_SETUP; + } wake_up(&stream->runtime->sleep); } diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h index 926ea701cdc4266542ab4ee6408834b1c3d236cd..5d0bf1688eba1eb17ead5f11301d42571d4ea6c0 100644 --- a/include/sound/hdaudio.h +++ b/include/sound/hdaudio.h @@ -228,9 +228,6 @@ struct hdac_io_ops { #define HDA_UNSOL_QUEUE_SIZE 64 #define HDA_MAX_CODECS 8 /* limit by controller side */ -/* HD Audio class code */ -#define PCI_CLASS_MULTIMEDIA_HD_AUDIO 0x0403 - /* * CORB/RIRB * diff --git a/include/sound/rawmidi.h b/include/sound/rawmidi.h index f54a533ecc8dd4655c8b9b5498edc8be37d3a154..17f2f6ed8defdc6570d4424403b6ff20f8be8b89 100644 --- a/include/sound/rawmidi.h +++ b/include/sound/rawmidi.h @@ -76,6 +76,7 @@ struct snd_rawmidi_runtime { size_t avail_min; /* min avail for wakeup */ size_t avail; /* max used buffer for wakeup */ size_t xruns; /* over/underruns counter */ + int buffer_ref; /* buffer reference count */ /* misc */ spinlock_t lock; struct mutex realloc_mutex; diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h index 5b95f24125ed5131ff555b832e4bc69a2985243b..e6e695510ad15eb488b30c88f28ff40b48d85ba5 100644 --- a/include/trace/events/f2fs.h +++ b/include/trace/events/f2fs.h @@ -50,6 +50,7 @@ TRACE_DEFINE_ENUM(CP_RECOVERY); TRACE_DEFINE_ENUM(CP_DISCARD); TRACE_DEFINE_ENUM(CP_TRIMMED); TRACE_DEFINE_ENUM(CP_PAUSE); +TRACE_DEFINE_ENUM(CP_RESIZE); #define show_block_type(type) \ __print_symbolic(type, \ @@ -136,7 +137,8 @@ TRACE_DEFINE_ENUM(CP_PAUSE); { CP_RECOVERY, "Recovery" }, \ { CP_DISCARD, "Discard" }, \ { CP_PAUSE, "Pause" }, \ - { CP_TRIMMED, "Trimmed" }) + { CP_TRIMMED, "Trimmed" }, \ + { CP_RESIZE, "Resize" }) #define show_fsync_cpreason(type) \ __print_symbolic(type, \ @@ -1847,6 +1849,8 @@ TRACE_EVENT(f2fs_iostat, __field(unsigned long long, app_rio) __field(unsigned long long, app_mrio) __field(unsigned long long, fs_drio) + __field(unsigned long long, fs_gdrio) + __field(unsigned long long, fs_cdrio) __field(unsigned long long, fs_nrio) __field(unsigned long long, fs_mrio) __field(unsigned long long, fs_discard) @@ -1871,6 +1875,8 @@ TRACE_EVENT(f2fs_iostat, __entry->app_rio = iostat[APP_READ_IO]; __entry->app_mrio = iostat[APP_MAPPED_READ_IO]; __entry->fs_drio = iostat[FS_DATA_READ_IO]; + __entry->fs_gdrio = iostat[FS_GDATA_READ_IO]; + __entry->fs_cdrio = iostat[FS_CDATA_READ_IO]; __entry->fs_nrio = iostat[FS_NODE_READ_IO]; __entry->fs_mrio = iostat[FS_META_READ_IO]; __entry->fs_discard = iostat[FS_DISCARD]; @@ -1882,15 +1888,16 @@ TRACE_EVENT(f2fs_iostat, "gc [data=%llu, node=%llu], " "cp [data=%llu, node=%llu, meta=%llu], " "app [read=%llu (direct=%llu, buffered=%llu), mapped=%llu], " - "fs [data=%llu, node=%llu, meta=%llu]", + "fs [data=%llu, (gc_data=%llu, compr_data=%llu), " + "node=%llu, meta=%llu]", show_dev(__entry->dev), __entry->app_wio, __entry->app_dio, __entry->app_bio, __entry->app_mio, __entry->fs_dio, __entry->fs_nio, __entry->fs_mio, __entry->fs_discard, __entry->fs_gc_dio, __entry->fs_gc_nio, __entry->fs_cp_dio, __entry->fs_cp_nio, __entry->fs_cp_mio, __entry->app_rio, __entry->app_drio, __entry->app_brio, - __entry->app_mrio, __entry->fs_drio, __entry->fs_nrio, - __entry->fs_mrio) + __entry->app_mrio, __entry->fs_drio, __entry->fs_gdrio, + __entry->fs_cdrio, __entry->fs_nrio, __entry->fs_mrio) ); #endif /* _TRACE_F2FS_H */ diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h index a58caf5807ffc5ae7180e9c0c7ab41dc1f4404d4..de3a1effbcbd5626ffc6560a83d6397ba1226b1e 100644 --- a/include/trace/events/rxrpc.h +++ b/include/trace/events/rxrpc.h @@ -333,7 +333,7 @@ enum rxrpc_congest_change { EM(rxrpc_cong_begin_retransmission, " Retrans") \ EM(rxrpc_cong_cleared_nacks, " Cleared") \ EM(rxrpc_cong_new_low_nack, " NewLowN") \ - EM(rxrpc_cong_no_change, "") \ + EM(rxrpc_cong_no_change, " -") \ EM(rxrpc_cong_progress, " Progres") \ EM(rxrpc_cong_retransmit_again, " ReTxAgn") \ EM(rxrpc_cong_rtt_window_end, " RttWinE") \ diff --git a/include/uapi/linux/bgcom_interface.h b/include/uapi/linux/bgcom_interface.h index 76632a9ef2ee6408729cd60a156067a7ab2336e7..dda0ecd0ec69dfcfb6a27fae076de5417362e554 100644 --- a/include/uapi/linux/bgcom_interface.h +++ b/include/uapi/linux/bgcom_interface.h @@ -17,6 +17,8 @@ #define BGCOM_BG_WEAR_LOAD 11 #define BGCOM_BG_WEAR_TWM_LOAD 12 #define BGCOM_BG_WEAR_UNLOAD 13 +#define BGCOM_BG_FETCH_TWM_DATA 14 +#define BGCOM_BG_READ_TWM_DATA 15 #define EXCHANGE_CODE 'V' struct bg_ui_data { @@ -25,6 +27,7 @@ struct bg_ui_data { __u32 bg_address; __u32 cmd; __u32 num_of_words; + __u8 __user *buffer; }; enum bg_event_type { @@ -81,4 +84,10 @@ enum bg_event_type { #define BG_ADSP_DOWN2_BG_DONE \ _IOWR(EXCHANGE_CODE, BGCOM_ADSP_DOWN2_BG, \ struct bg_ui_data) +#define BG_FETCH_TWM_DATA \ + _IOWR(EXCHANGE_CODE, BGCOM_BG_FETCH_TWM_DATA, \ + struct bg_ui_data) +#define BG_READ_TWM_DATA \ + _IOWR(EXCHANGE_CODE, BGCOM_BG_READ_TWM_DATA, \ + struct bg_ui_data) #endif diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h index 385c141d54338ccb7f213b39dbfb37f8a81faac9..79d1f51d70deaab7bd308b7f8ca06ab36cba2b9a 100644 --- a/include/uapi/linux/input-event-codes.h +++ b/include/uapi/linux/input-event-codes.h @@ -805,6 +805,7 @@ #define SW_HPHR_OVERCURRENT 0x11 /* set = over current on right hph */ #define SW_MICROPHONE2_INSERT 0x12 /* set = inserted */ #define SW_UNSUPPORT_INSERT 0x13 /* set = unsupported device inserted */ +#define SW_MACHINE_COVER 0x14 /* set = cover closed */ #define SW_MAX 0x20 #define SW_CNT (SW_MAX+1) diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 27c62abb6c9ef0b69226401dac60229932e95cde..efe8873943f669f7ce56befe4e9b995388d3e832 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -189,9 +189,11 @@ struct kvm_hyperv_exit { #define KVM_EXIT_HYPERV_SYNIC 1 #define KVM_EXIT_HYPERV_HCALL 2 __u32 type; + __u32 pad1; union { struct { __u32 msr; + __u32 pad2; __u64 control; __u64 evt_page; __u64 msg_page; diff --git a/include/uapi/linux/mmc/ioctl.h b/include/uapi/linux/mmc/ioctl.h index 7f1eccf62e60eaa8e1e22728c83f7d661df020da..6329fc62d6e291abb50d2387be5bd7438cfe2769 100644 --- a/include/uapi/linux/mmc/ioctl.h +++ b/include/uapi/linux/mmc/ioctl.h @@ -3,6 +3,7 @@ #define LINUX_MMC_IOCTL_H #include +#include struct mmc_ioc_cmd { /* Implies direction of data. true = write, false = read */ diff --git a/include/uapi/linux/raid/md_p.h b/include/uapi/linux/raid/md_p.h index b0d15c73f6d758db858b9ca0489d3e4978b01323..1f2d8c81f0e0cede75dbf3fc03e3c49558d16f26 100644 --- a/include/uapi/linux/raid/md_p.h +++ b/include/uapi/linux/raid/md_p.h @@ -329,6 +329,7 @@ struct mdp_superblock_1 { #define MD_FEATURE_JOURNAL 512 /* support write cache */ #define MD_FEATURE_PPL 1024 /* support PPL */ #define MD_FEATURE_MULTIPLE_PPLS 2048 /* support for multiple PPLs */ +#define MD_FEATURE_RAID0_LAYOUT 4096 /* layout is meaningful for RAID0 */ #define MD_FEATURE_ALL (MD_FEATURE_BITMAP_OFFSET \ |MD_FEATURE_RECOVERY_OFFSET \ |MD_FEATURE_RESHAPE_ACTIVE \ @@ -341,6 +342,7 @@ struct mdp_superblock_1 { |MD_FEATURE_JOURNAL \ |MD_FEATURE_PPL \ |MD_FEATURE_MULTIPLE_PPLS \ + |MD_FEATURE_RAID0_LAYOUT \ ) struct r5l_payload_header { diff --git a/include/uapi/linux/usb/raw_gadget.h b/include/uapi/linux/usb/raw_gadget.h index ea375082b3ac7ab9fa6ea6919ce35cb0ff9a6588..0be685272eb180074999aaf13bfdebd302903765 100644 --- a/include/uapi/linux/usb/raw_gadget.h +++ b/include/uapi/linux/usb/raw_gadget.h @@ -93,6 +93,64 @@ struct usb_raw_ep_io { __u8 data[0]; }; +/* Maximum number of non-control endpoints in struct usb_raw_eps_info. */ +#define USB_RAW_EPS_NUM_MAX 30 + +/* Maximum length of UDC endpoint name in struct usb_raw_ep_info. */ +#define USB_RAW_EP_NAME_MAX 16 + +/* Used as addr in struct usb_raw_ep_info if endpoint accepts any address. */ +#define USB_RAW_EP_ADDR_ANY 0xff + +/* + * struct usb_raw_ep_caps - exposes endpoint capabilities from struct usb_ep + * (technically from its member struct usb_ep_caps). + */ +struct usb_raw_ep_caps { + __u32 type_control : 1; + __u32 type_iso : 1; + __u32 type_bulk : 1; + __u32 type_int : 1; + __u32 dir_in : 1; + __u32 dir_out : 1; +}; + +/* + * struct usb_raw_ep_limits - exposes endpoint limits from struct usb_ep. + * @maxpacket_limit: Maximum packet size value supported by this endpoint. + * @max_streams: maximum number of streams supported by this endpoint + * (actual number is 2^n). + * @reserved: Empty, reserved for potential future extensions. + */ +struct usb_raw_ep_limits { + __u16 maxpacket_limit; + __u16 max_streams; + __u32 reserved; +}; + +/* + * struct usb_raw_ep_info - stores information about a gadget endpoint. + * @name: Name of the endpoint as it is defined in the UDC driver. + * @addr: Address of the endpoint that must be specified in the endpoint + * descriptor passed to USB_RAW_IOCTL_EP_ENABLE ioctl. + * @caps: Endpoint capabilities. + * @limits: Endpoint limits. + */ +struct usb_raw_ep_info { + __u8 name[USB_RAW_EP_NAME_MAX]; + __u32 addr; + struct usb_raw_ep_caps caps; + struct usb_raw_ep_limits limits; +}; + +/* + * struct usb_raw_eps_info - argument for USB_RAW_IOCTL_EPS_INFO ioctl. + * eps: Structures that store information about non-control endpoints. + */ +struct usb_raw_eps_info { + struct usb_raw_ep_info eps[USB_RAW_EPS_NUM_MAX]; +}; + /* * Initializes a Raw Gadget instance. * Accepts a pointer to the usb_raw_init struct as an argument. @@ -115,37 +173,38 @@ struct usb_raw_ep_io { #define USB_RAW_IOCTL_EVENT_FETCH _IOR('U', 2, struct usb_raw_event) /* - * Queues an IN (OUT for READ) urb as a response to the last control request - * received on endpoint 0, provided that was an IN (OUT for READ) request and - * waits until the urb is completed. Copies received data to user for READ. + * Queues an IN (OUT for READ) request as a response to the last setup request + * received on endpoint 0 (provided that was an IN (OUT for READ) request), and + * waits until the request is completed. Copies received data to user for READ. * Accepts a pointer to the usb_raw_ep_io struct as an argument. - * Returns length of trasferred data on success or negative error code on + * Returns length of transferred data on success or negative error code on * failure. */ #define USB_RAW_IOCTL_EP0_WRITE _IOW('U', 3, struct usb_raw_ep_io) #define USB_RAW_IOCTL_EP0_READ _IOWR('U', 4, struct usb_raw_ep_io) /* - * Finds an endpoint that supports the transfer type specified in the - * descriptor and enables it. - * Accepts a pointer to the usb_endpoint_descriptor struct as an argument. + * Finds an endpoint that satisfies the parameters specified in the provided + * descriptors (address, transfer type, etc.) and enables it. + * Accepts a pointer to the usb_raw_ep_descs struct as an argument. * Returns enabled endpoint handle on success or negative error code on failure. */ #define USB_RAW_IOCTL_EP_ENABLE _IOW('U', 5, struct usb_endpoint_descriptor) -/* Disables specified endpoint. +/* + * Disables specified endpoint. * Accepts endpoint handle as an argument. * Returns 0 on success or negative error code on failure. */ #define USB_RAW_IOCTL_EP_DISABLE _IOW('U', 6, __u32) /* - * Queues an IN (OUT for READ) urb as a response to the last control request - * received on endpoint usb_raw_ep_io.ep, provided that was an IN (OUT for READ) - * request and waits until the urb is completed. Copies received data to user - * for READ. + * Queues an IN (OUT for READ) request as a response to the last setup request + * received on endpoint usb_raw_ep_io.ep (provided that was an IN (OUT for READ) + * request), and waits until the request is completed. Copies received data to + * user for READ. * Accepts a pointer to the usb_raw_ep_io struct as an argument. - * Returns length of trasferred data on success or negative error code on + * Returns length of transferred data on success or negative error code on * failure. */ #define USB_RAW_IOCTL_EP_WRITE _IOW('U', 7, struct usb_raw_ep_io) @@ -164,4 +223,27 @@ struct usb_raw_ep_io { */ #define USB_RAW_IOCTL_VBUS_DRAW _IOW('U', 10, __u32) +/* + * Fills in the usb_raw_eps_info structure with information about non-control + * endpoints available for the currently connected UDC. + * Returns the number of available endpoints on success or negative error code + * on failure. + */ +#define USB_RAW_IOCTL_EPS_INFO _IOR('U', 11, struct usb_raw_eps_info) + +/* + * Stalls a pending control request on endpoint 0. + * Returns 0 on success or negative error code on failure. + */ +#define USB_RAW_IOCTL_EP0_STALL _IO('U', 12) + +/* + * Sets or clears halt or wedge status of the endpoint. + * Accepts endpoint handle as an argument. + * Returns 0 on success or negative error code on failure. + */ +#define USB_RAW_IOCTL_EP_SET_HALT _IOW('U', 13, __u32) +#define USB_RAW_IOCTL_EP_CLEAR_HALT _IOW('U', 14, __u32) +#define USB_RAW_IOCTL_EP_SET_WEDGE _IOW('U', 15, __u32) + #endif /* _UAPI__LINUX_USB_RAW_GADGET_H */ diff --git a/include/uapi/linux/xfrm.h b/include/uapi/linux/xfrm.h index 5f3b9fec7b5f4491ad9f38beea7447a305ff4fb0..ff7cfdc6cb44dc98dde15e2e39ebebd80ec8f916 100644 --- a/include/uapi/linux/xfrm.h +++ b/include/uapi/linux/xfrm.h @@ -304,7 +304,7 @@ enum xfrm_attr_type_t { XFRMA_PROTO, /* __u8 */ XFRMA_ADDRESS_FILTER, /* struct xfrm_address_filter */ XFRMA_PAD, - XFRMA_OFFLOAD_DEV, /* struct xfrm_state_offload */ + XFRMA_OFFLOAD_DEV, /* struct xfrm_user_offload */ XFRMA_SET_MARK, /* __u32 */ XFRMA_SET_MARK_MASK, /* __u32 */ XFRMA_IF_ID, /* __u32 */ diff --git a/init/main.c b/init/main.c index e2294e865a6b9abefb593af6c831648f8a0426aa..8cf7d4c1afe4e65e7a1dfd18399afdcd226082d6 100644 --- a/init/main.c +++ b/init/main.c @@ -733,6 +733,8 @@ asmlinkage __visible void __init start_kernel(void) /* Do the rest non-__init'ed, we're now alive */ rest_init(); + + prevent_tail_call_optimization(); } /* Call all constructor functions linked into the kernel. */ diff --git a/ipc/util.c b/ipc/util.c index 7989f5e532198b19d65d895b5d0326d4bfe0bd36..5a65b0cbae7db6d5686d33bfcfd6b24e96daa6e6 100644 --- a/ipc/util.c +++ b/ipc/util.c @@ -750,21 +750,21 @@ static struct kern_ipc_perm *sysvipc_find_ipc(struct ipc_ids *ids, loff_t pos, total++; } - *new_pos = pos + 1; + ipc = NULL; if (total >= ids->in_use) - return NULL; + goto out; for (; pos < IPCMNI; pos++) { ipc = idr_find(&ids->ipcs_idr, pos); if (ipc != NULL) { rcu_read_lock(); ipc_lock_object(ipc); - return ipc; + break; } } - - /* Out of range - return NULL to terminate iteration */ - return NULL; +out: + *new_pos = pos + 1; + return ipc; } static void *sysvipc_proc_next(struct seq_file *s, void *it, loff_t *pos) diff --git a/kernel/audit.c b/kernel/audit.c index aa6d5e39526b1840bf71869db6b9bcf5708f0db9..6faaa908544af67e5f820b734cf297d0f4bbfada 100644 --- a/kernel/audit.c +++ b/kernel/audit.c @@ -853,7 +853,7 @@ static int kauditd_thread(void *dummy) return 0; } -int audit_send_list(void *_dest) +int audit_send_list_thread(void *_dest) { struct audit_netlink_list *dest = _dest; struct sk_buff *skb; @@ -897,19 +897,30 @@ struct sk_buff *audit_make_reply(int seq, int type, int done, return NULL; } +static void audit_free_reply(struct audit_reply *reply) +{ + if (!reply) + return; + + if (reply->skb) + kfree_skb(reply->skb); + if (reply->net) + put_net(reply->net); + kfree(reply); +} + static int audit_send_reply_thread(void *arg) { struct audit_reply *reply = (struct audit_reply *)arg; - struct sock *sk = audit_get_sk(reply->net); mutex_lock(&audit_cmd_mutex); mutex_unlock(&audit_cmd_mutex); /* Ignore failure. It'll only happen if the sender goes away, because our timeout is set to infinite. */ - netlink_unicast(sk, reply->skb, reply->portid, 0); - put_net(reply->net); - kfree(reply); + netlink_unicast(audit_get_sk(reply->net), reply->skb, reply->portid, 0); + reply->skb = NULL; + audit_free_reply(reply); return 0; } @@ -923,35 +934,32 @@ static int audit_send_reply_thread(void *arg) * @payload: payload data * @size: payload size * - * Allocates an skb, builds the netlink message, and sends it to the port id. - * No failure notifications. + * Allocates a skb, builds the netlink message, and sends it to the port id. */ static void audit_send_reply(struct sk_buff *request_skb, int seq, int type, int done, int multi, const void *payload, int size) { - struct net *net = sock_net(NETLINK_CB(request_skb).sk); - struct sk_buff *skb; struct task_struct *tsk; - struct audit_reply *reply = kmalloc(sizeof(struct audit_reply), - GFP_KERNEL); + struct audit_reply *reply; + reply = kzalloc(sizeof(*reply), GFP_KERNEL); if (!reply) return; - skb = audit_make_reply(seq, type, done, multi, payload, size); - if (!skb) - goto out; - - reply->net = get_net(net); + reply->skb = audit_make_reply(seq, type, done, multi, payload, size); + if (!reply->skb) + goto err; + reply->net = get_net(sock_net(NETLINK_CB(request_skb).sk)); reply->portid = NETLINK_CB(request_skb).portid; - reply->skb = skb; tsk = kthread_run(audit_send_reply_thread, reply, "audit_send_reply"); - if (!IS_ERR(tsk)) - return; - kfree_skb(skb); -out: - kfree(reply); + if (IS_ERR(tsk)) + goto err; + + return; + +err: + audit_free_reply(reply); } /* diff --git a/kernel/audit.h b/kernel/audit.h index 9b110ae17ee3fe744592fdb1127d1dc868577af2..1007773b0b8162d4d9ce7306e66afe5b33507205 100644 --- a/kernel/audit.h +++ b/kernel/audit.h @@ -248,7 +248,7 @@ struct audit_netlink_list { struct sk_buff_head q; }; -int audit_send_list(void *_dest); +int audit_send_list_thread(void *_dest); extern int selinux_audit_rule_update(void); diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c index 16cf396ea738a2c29769b18997f30fdb0d59a9a9..f26f4cb5d08d9ade5a58b4afc1fad306b095a071 100644 --- a/kernel/auditfilter.c +++ b/kernel/auditfilter.c @@ -1137,11 +1137,8 @@ int audit_rule_change(int type, int seq, void *data, size_t datasz) */ int audit_list_rules_send(struct sk_buff *request_skb, int seq) { - u32 portid = NETLINK_CB(request_skb).portid; - struct net *net = sock_net(NETLINK_CB(request_skb).sk); struct task_struct *tsk; struct audit_netlink_list *dest; - int err = 0; /* We can't just spew out the rules here because we might fill * the available socket buffer space and deadlock waiting for @@ -1149,25 +1146,26 @@ int audit_list_rules_send(struct sk_buff *request_skb, int seq) * happen if we're actually running in the context of auditctl * trying to _send_ the stuff */ - dest = kmalloc(sizeof(struct audit_netlink_list), GFP_KERNEL); + dest = kmalloc(sizeof(*dest), GFP_KERNEL); if (!dest) return -ENOMEM; - dest->net = get_net(net); - dest->portid = portid; + dest->net = get_net(sock_net(NETLINK_CB(request_skb).sk)); + dest->portid = NETLINK_CB(request_skb).portid; skb_queue_head_init(&dest->q); mutex_lock(&audit_filter_mutex); audit_list_rules(seq, &dest->q); mutex_unlock(&audit_filter_mutex); - tsk = kthread_run(audit_send_list, dest, "audit_send_list"); + tsk = kthread_run(audit_send_list_thread, dest, "audit_send_list"); if (IS_ERR(tsk)) { skb_queue_purge(&dest->q); + put_net(dest->net); kfree(dest); - err = PTR_ERR(tsk); + return PTR_ERR(tsk); } - return err; + return 0; } int audit_comparator(u32 left, u32 op, u32 right) diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index a86b94e29d6bddd5afed002f8a3f9f7899bc6663..8c3850ab1da5c8c5f6a12ea0118edee282dcf326 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -5924,17 +5924,8 @@ void cgroup_sk_alloc_disable(void) void cgroup_sk_alloc(struct sock_cgroup_data *skcd) { - if (cgroup_sk_alloc_disabled) - return; - - /* Socket clone path */ - if (skcd->val) { - /* - * We might be cloning a socket which is left in an empty - * cgroup and the cgroup might have already been rmdir'd. - * Don't use cgroup_get_live(). - */ - cgroup_get(sock_cgroup_ptr(skcd)); + if (cgroup_sk_alloc_disabled) { + skcd->no_refcnt = 1; return; } @@ -5958,8 +5949,24 @@ void cgroup_sk_alloc(struct sock_cgroup_data *skcd) rcu_read_unlock(); } +void cgroup_sk_clone(struct sock_cgroup_data *skcd) +{ + /* Socket clone path */ + if (skcd->val) { + /* + * We might be cloning a socket which is left in an empty + * cgroup and the cgroup might have already been rmdir'd. + * Don't use cgroup_get_live(). + */ + cgroup_get(sock_cgroup_ptr(skcd)); + } +} + void cgroup_sk_free(struct sock_cgroup_data *skcd) { + if (skcd->no_refcnt) + return; + cgroup_put(sock_cgroup_ptr(skcd)); } diff --git a/kernel/compat.c b/kernel/compat.c index 7e83733d4c95c854c1a4acf2b8b7a96bf91c0d59..45ae3ace49c294ae086869627cdf2bed7186bdd9 100644 --- a/kernel/compat.c +++ b/kernel/compat.c @@ -437,10 +437,9 @@ long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask, bitmap_size = ALIGN(bitmap_size, BITS_PER_COMPAT_LONG); nr_compat_longs = BITS_TO_COMPAT_LONGS(bitmap_size); - if (!access_ok(VERIFY_READ, umask, bitmap_size / 8)) + if (!user_access_begin(VERIFY_READ, umask, bitmap_size / 8)) return -EFAULT; - user_access_begin(); while (nr_compat_longs > 1) { compat_ulong_t l1, l2; unsafe_get_user(l1, umask++, Efault); @@ -467,10 +466,9 @@ long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask, bitmap_size = ALIGN(bitmap_size, BITS_PER_COMPAT_LONG); nr_compat_longs = BITS_TO_COMPAT_LONGS(bitmap_size); - if (!access_ok(VERIFY_WRITE, umask, bitmap_size / 8)) + if (!user_access_begin(VERIFY_WRITE, umask, bitmap_size / 8)) return -EFAULT; - user_access_begin(); while (nr_compat_longs > 1) { unsigned long m = *mask++; unsafe_put_user((compat_ulong_t)m, umask++, Efault); diff --git a/kernel/cpu_pm.c b/kernel/cpu_pm.c index f28e05cadb9a57fd91a28ae618969256cb40cceb..e241c91771352c47aa6df3097b4918e178a900f1 100644 --- a/kernel/cpu_pm.c +++ b/kernel/cpu_pm.c @@ -92,7 +92,7 @@ EXPORT_SYMBOL_GPL(cpu_pm_unregister_notifier); */ int cpu_pm_enter(void) { - int nr_calls; + int nr_calls = 0; int ret = 0; ret = cpu_pm_notify(CPU_PM_ENTER, -1, &nr_calls, NULL); @@ -143,7 +143,7 @@ EXPORT_SYMBOL_GPL(cpu_pm_exit); */ int cpu_cluster_pm_enter(unsigned long aff_level) { - int nr_calls; + int nr_calls = 0; int ret = 0; ret = cpu_pm_notify(CPU_CLUSTER_PM_ENTER, -1, &nr_calls, diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c index 94aa9ae0007ace3a8b85db2ad6070883b90db7fc..694fcd0492827df964d15877acad1498b23680ea 100644 --- a/kernel/debug/debug_core.c +++ b/kernel/debug/debug_core.c @@ -444,6 +444,7 @@ static int kgdb_reenter_check(struct kgdb_state *ks) if (exception_level > 1) { dump_stack(); + kgdb_io_module_registered = false; panic("Recursive entry to debugger"); } @@ -488,6 +489,7 @@ static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs, arch_kgdb_ops.disable_hw_break(regs); acquirelock: + rcu_read_lock(); /* * Interrupts will be restored by the 'trap return' code, except when * single stepping. @@ -544,6 +546,7 @@ static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs, atomic_dec(&slaves_in_kgdb); dbg_touch_watchdogs(); local_irq_restore(flags); + rcu_read_unlock(); return 0; } cpu_relax(); @@ -562,6 +565,7 @@ static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs, raw_spin_unlock(&dbg_master_lock); dbg_touch_watchdogs(); local_irq_restore(flags); + rcu_read_unlock(); goto acquirelock; } @@ -681,6 +685,7 @@ static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs, raw_spin_unlock(&dbg_master_lock); dbg_touch_watchdogs(); local_irq_restore(flags); + rcu_read_unlock(); return kgdb_info[cpu].ret_state; } diff --git a/kernel/events/core.c b/kernel/events/core.c index 0033bc900c4bcf0e3e41ab1b2fcf770b4c0579e2..74eba84d363b6cbd68e8e6fd3f97af8f141e188e 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -94,11 +94,11 @@ static void remote_function(void *data) * @info: the function call argument * * Calls the function @func when the task is currently running. This might - * be on the current CPU, which just calls the function directly + * be on the current CPU, which just calls the function directly. This will + * retry due to any failures in smp_call_function_single(), such as if the + * task_cpu() goes offline concurrently. * - * returns: @func return value, or - * -ESRCH - when the process isn't running - * -EAGAIN - when the process moved away + * returns @func return value or -ESRCH when the process isn't running */ static int task_function_call(struct task_struct *p, remote_function_f func, void *info) @@ -111,11 +111,16 @@ task_function_call(struct task_struct *p, remote_function_f func, void *info) }; int ret; - do { - ret = smp_call_function_single(task_cpu(p), remote_function, &data, 1); - if (!ret) - ret = data.ret; - } while (ret == -EAGAIN); + for (;;) { + ret = smp_call_function_single(task_cpu(p), remote_function, + &data, 1); + ret = !ret ? data.ret : -EAGAIN; + + if (ret != -EAGAIN) + break; + + cond_resched(); + } return ret; } @@ -7331,6 +7336,10 @@ static bool perf_addr_filter_match(struct perf_addr_filter *filter, struct file *file, unsigned long offset, unsigned long size) { + /* d_inode(NULL) won't be equal to any mapped user-space file */ + if (!filter->path.dentry) + return false; + if (d_inode(filter->path.dentry) != file_inode(file)) return false; diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index c74fc982625082d1669e44e7276bd039ddca9fee..f08ba79ee5d1f6471f127c549db80e50c8738f2e 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -612,10 +612,6 @@ static int prepare_uprobe(struct uprobe *uprobe, struct file *file, if (ret) goto out; - /* uprobe_write_opcode() assumes we don't cross page boundary */ - BUG_ON((uprobe->offset & ~PAGE_MASK) + - UPROBE_SWBP_INSN_SIZE > PAGE_SIZE); - smp_wmb(); /* pairs with the smp_rmb() in handle_swbp() */ set_bit(UPROBE_COPY_INSN, &uprobe->flags); @@ -894,6 +890,13 @@ int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer * if (offset > i_size_read(inode)) return -EINVAL; + /* + * This ensures that copy_from_page() and copy_to_page() + * can't cross page boundary. + */ + if (!IS_ALIGNED(offset, UPROBE_SWBP_INSN_SIZE)) + return -EINVAL; + retry: uprobe = alloc_uprobe(inode, offset); if (!uprobe) @@ -1704,6 +1707,9 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr) uprobe_opcode_t opcode; int result; + if (WARN_ON_ONCE(!IS_ALIGNED(vaddr, UPROBE_SWBP_INSN_SIZE))) + return -EINVAL; + pagefault_disable(); result = __get_user(opcode, (uprobe_opcode_t __user *)vaddr); pagefault_enable(); @@ -1887,7 +1893,7 @@ static void handle_swbp(struct pt_regs *regs) if (!uprobe) { if (is_swbp > 0) { /* No matching uprobe; signal SIGTRAP. */ - send_sig(SIGTRAP, current, 0); + force_sig(SIGTRAP, current); } else { /* * Either we raced with uprobe_unregister() or we can't diff --git a/kernel/exit.c b/kernel/exit.c index 5a25e760f421ba19650b15eeb61ceee8bc7d55ba..d9013af253bf98bd0b8bce8eeb5b7fafb3a384b5 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -780,8 +780,12 @@ void __noreturn do_exit(long code) struct task_struct *tsk = current; int group_dead; - profile_task_exit(tsk); - kcov_task_exit(tsk); + /* + * We can get here from a kernel oops, sometimes with preemption off. + * Start by checking for critical errors. + * Then fix up important state like USER_DS and preemption. + * Then do everything else. + */ WARN_ON(blk_needs_flush_plug(tsk)); @@ -799,6 +803,16 @@ void __noreturn do_exit(long code) */ set_fs(USER_DS); + if (unlikely(in_atomic())) { + pr_info("note: %s[%d] exited with preempt_count %d\n", + current->comm, task_pid_nr(current), + preempt_count()); + preempt_count_set(PREEMPT_ENABLED); + } + + profile_task_exit(tsk); + kcov_task_exit(tsk); + ptrace_event(PTRACE_EVENT_EXIT, code); validate_creds_for_do_exit(tsk); @@ -821,13 +835,6 @@ void __noreturn do_exit(long code) exit_signals(tsk); /* sets PF_EXITING */ sched_exit(tsk); - if (unlikely(in_atomic())) { - pr_info("note: %s[%d] exited with preempt_count %d\n", - current->comm, task_pid_nr(current), - preempt_count()); - preempt_count_set(PREEMPT_ENABLED); - } - /* sync mm's RSS info before statistics gathering */ if (tsk->mm) sync_mm_rss(tsk->mm); @@ -1612,10 +1619,9 @@ SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *, if (!infop) return err; - if (!access_ok(VERIFY_WRITE, infop, sizeof(*infop))) + if (!user_access_begin(VERIFY_WRITE, infop, sizeof(*infop))) return -EFAULT; - user_access_begin(); unsafe_put_user(signo, &infop->si_signo, Efault); unsafe_put_user(0, &infop->si_errno, Efault); unsafe_put_user(info.cause, &infop->si_code, Efault); @@ -1740,10 +1746,9 @@ COMPAT_SYSCALL_DEFINE5(waitid, if (!infop) return err; - if (!access_ok(VERIFY_WRITE, infop, sizeof(*infop))) + if (!user_access_begin(VERIFY_WRITE, infop, sizeof(*infop))) return -EFAULT; - user_access_begin(); unsafe_put_user(signo, &infop->si_signo, Efault); unsafe_put_user(0, &infop->si_errno, Efault); unsafe_put_user(info.cause, &infop->si_code, Efault); diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c index 86ae0eb80b533f42f4d13f31b3b9f902b5cd8e88..8b8cecd18cce1df2da0d3b7b1b5241d9432aa4b4 100644 --- a/kernel/irq/migration.c +++ b/kernel/irq/migration.c @@ -38,17 +38,18 @@ bool irq_fixup_move_pending(struct irq_desc *desc, bool force_clear) void irq_move_masked_irq(struct irq_data *idata) { struct irq_desc *desc = irq_data_to_desc(idata); - struct irq_chip *chip = desc->irq_data.chip; + struct irq_data *data = &desc->irq_data; + struct irq_chip *chip = data->chip; - if (likely(!irqd_is_setaffinity_pending(&desc->irq_data))) + if (likely(!irqd_is_setaffinity_pending(data))) return; - irqd_clr_move_pending(&desc->irq_data); + irqd_clr_move_pending(data); /* * Paranoia: cpu-local interrupts shouldn't be calling in here anyway. */ - if (irqd_is_per_cpu(&desc->irq_data)) { + if (irqd_is_per_cpu(data)) { WARN_ON(1); return; } @@ -73,9 +74,20 @@ void irq_move_masked_irq(struct irq_data *idata) * For correct operation this depends on the caller * masking the irqs. */ - if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids) - irq_do_set_affinity(&desc->irq_data, desc->pending_mask, false); - + if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids) { + int ret; + + ret = irq_do_set_affinity(data, desc->pending_mask, false); + /* + * If the there is a cleanup pending in the underlying + * vector management, reschedule the move for the next + * interrupt. Leave desc->pending_mask intact. + */ + if (ret == -EBUSY) { + irqd_set_move_pending(data); + return; + } + } cpumask_clear(desc->pending_mask); } diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 66f1818d47620be11c91a742f975399be18c5db7..f2d2194b51ca33e14fb728801ad8b5a0e30755ee 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -599,11 +599,12 @@ static void kprobe_optimizer(struct work_struct *work) mutex_unlock(&module_mutex); mutex_unlock(&text_mutex); cpus_read_unlock(); - mutex_unlock(&kprobe_mutex); /* Step 5: Kick optimizer again if needed */ if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) kick_kprobe_optimizer(); + + mutex_unlock(&kprobe_mutex); } /* Wait for completing optimization and unoptimization */ @@ -1217,6 +1218,26 @@ __releases(hlist_lock) } NOKPROBE_SYMBOL(kretprobe_table_unlock); +struct kprobe kprobe_busy = { + .addr = (void *) get_kprobe, +}; + +void kprobe_busy_begin(void) +{ + struct kprobe_ctlblk *kcb; + + preempt_disable(); + __this_cpu_write(current_kprobe, &kprobe_busy); + kcb = get_kprobe_ctlblk(); + kcb->kprobe_status = KPROBE_HIT_ACTIVE; +} + +void kprobe_busy_end(void) +{ + __this_cpu_write(current_kprobe, NULL); + preempt_enable(); +} + /* * This function is called from finish_task_switch when task tk becomes dead, * so that we can recycle any function-return probe instances associated @@ -1234,6 +1255,8 @@ void kprobe_flush_task(struct task_struct *tk) /* Early boot. kretprobe_table_locks not yet initialized. */ return; + kprobe_busy_begin(); + INIT_HLIST_HEAD(&empty_rp); hash = hash_ptr(tk, KPROBE_HASH_BITS); head = &kretprobe_inst_table[hash]; @@ -1247,6 +1270,8 @@ void kprobe_flush_task(struct task_struct *tk) hlist_del(&ri->hlist); kfree(ri); } + + kprobe_busy_end(); } NOKPROBE_SYMBOL(kprobe_flush_task); diff --git a/kernel/module.c b/kernel/module.c index 31c5623457e8e0c2596a7b8ee1cb4cde026dd09a..d32e1a2a2c742bdb9ca191bb95aa4fa9bb8e343b 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -4161,8 +4161,10 @@ int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *, static void cfi_init(struct module *mod) { #ifdef CONFIG_CFI_CLANG + preempt_disable(); mod->cfi_check = (cfi_check_fn)mod_find_symname(mod, CFI_CHECK_FN_NAME); + preempt_enable(); cfi_module_add(mod, module_addr_min, module_addr_max); #endif } diff --git a/kernel/padata.c b/kernel/padata.c index a71620d2b8bab46d81d4f590a6212a7f010d1eba..f56ec63f60ba85c49eaa22dcf6443229fcc39219 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -133,6 +133,7 @@ int padata_do_parallel(struct padata_instance *pinst, padata->cb_cpu = cb_cpu; target_cpu = padata_cpu_hash(pd); + padata->cpu = target_cpu; queue = per_cpu_ptr(pd->pqueue, target_cpu); spin_lock(&queue->parallel.lock); @@ -165,23 +166,12 @@ EXPORT_SYMBOL(padata_do_parallel); */ static struct padata_priv *padata_get_next(struct parallel_data *pd) { - int cpu, num_cpus; - unsigned int next_nr, next_index; struct padata_parallel_queue *next_queue; struct padata_priv *padata; struct padata_list *reorder; + int cpu = pd->cpu; - num_cpus = cpumask_weight(pd->cpumask.pcpu); - - /* - * Calculate the percpu reorder queue and the sequence - * number of the next object. - */ - next_nr = pd->processed; - next_index = next_nr % num_cpus; - cpu = padata_index_to_cpu(pd, next_index); next_queue = per_cpu_ptr(pd->pqueue, cpu); - reorder = &next_queue->reorder; spin_lock(&reorder->lock); @@ -192,7 +182,8 @@ static struct padata_priv *padata_get_next(struct parallel_data *pd) list_del_init(&padata->list); atomic_dec(&pd->reorder_objects); - pd->processed++; + pd->cpu = cpumask_next_wrap(cpu, pd->cpumask.pcpu, -1, + false); spin_unlock(&reorder->lock); goto out; @@ -215,6 +206,7 @@ static void padata_reorder(struct parallel_data *pd) struct padata_priv *padata; struct padata_serial_queue *squeue; struct padata_instance *pinst = pd->pinst; + struct padata_parallel_queue *next_queue; /* * We need to ensure that only one cpu can work on dequeueing of @@ -246,7 +238,6 @@ static void padata_reorder(struct parallel_data *pd) * so exit immediately. */ if (PTR_ERR(padata) == -ENODATA) { - del_timer(&pd->timer); spin_unlock_bh(&pd->lock); return; } @@ -265,28 +256,27 @@ static void padata_reorder(struct parallel_data *pd) /* * The next object that needs serialization might have arrived to - * the reorder queues in the meantime, we will be called again - * from the timer function if no one else cares for it. + * the reorder queues in the meantime. * - * Ensure reorder_objects is read after pd->lock is dropped so we see - * an increment from another task in padata_do_serial. Pairs with + * Ensure reorder queue is read after pd->lock is dropped so we see + * new objects from another task in padata_do_serial. Pairs with * smp_mb__after_atomic in padata_do_serial. */ smp_mb(); - if (atomic_read(&pd->reorder_objects) - && !(pinst->flags & PADATA_RESET)) - mod_timer(&pd->timer, jiffies + HZ); - else - del_timer(&pd->timer); - return; + next_queue = per_cpu_ptr(pd->pqueue, pd->cpu); + if (!list_empty(&next_queue->reorder.list)) + queue_work(pinst->wq, &pd->reorder_work); } -static void padata_reorder_timer(unsigned long arg) +static void invoke_padata_reorder(struct work_struct *work) { - struct parallel_data *pd = (struct parallel_data *)arg; + struct parallel_data *pd; + local_bh_disable(); + pd = container_of(work, struct parallel_data, reorder_work); padata_reorder(pd); + local_bh_enable(); } static void padata_serial_worker(struct work_struct *serial_work) @@ -333,29 +323,22 @@ static void padata_serial_worker(struct work_struct *serial_work) */ void padata_do_serial(struct padata_priv *padata) { - int cpu; - struct padata_parallel_queue *pqueue; - struct parallel_data *pd; - - pd = padata->pd; - - cpu = get_cpu(); - pqueue = per_cpu_ptr(pd->pqueue, cpu); + struct parallel_data *pd = padata->pd; + struct padata_parallel_queue *pqueue = per_cpu_ptr(pd->pqueue, + padata->cpu); spin_lock(&pqueue->reorder.lock); - atomic_inc(&pd->reorder_objects); list_add_tail(&padata->list, &pqueue->reorder.list); + atomic_inc(&pd->reorder_objects); spin_unlock(&pqueue->reorder.lock); /* - * Ensure the atomic_inc of reorder_objects above is ordered correctly + * Ensure the addition to the reorder list is ordered correctly * with the trylock of pd->lock in padata_reorder. Pairs with smp_mb * in padata_reorder. */ smp_mb__after_atomic(); - put_cpu(); - padata_reorder(pd); } EXPORT_SYMBOL(padata_do_serial); @@ -404,9 +387,14 @@ static void padata_init_pqueues(struct parallel_data *pd) struct padata_parallel_queue *pqueue; cpu_index = 0; - for_each_cpu(cpu, pd->cpumask.pcpu) { + for_each_possible_cpu(cpu) { pqueue = per_cpu_ptr(pd->pqueue, cpu); - pqueue->pd = pd; + + if (!cpumask_test_cpu(cpu, pd->cpumask.pcpu)) { + pqueue->cpu_index = -1; + continue; + } + pqueue->cpu_index = cpu_index; cpu_index++; @@ -440,12 +428,13 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst, padata_init_pqueues(pd); padata_init_squeues(pd); - setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd); atomic_set(&pd->seq_nr, -1); atomic_set(&pd->reorder_objects, 0); atomic_set(&pd->refcnt, 1); pd->pinst = pinst; spin_lock_init(&pd->lock); + pd->cpu = cpumask_first(pd->cpumask.pcpu); + INIT_WORK(&pd->reorder_work, invoke_padata_reorder); return pd; diff --git a/kernel/relay.c b/kernel/relay.c index 61d37e6da22dd1c30a7f15f28ec3bdfc5c17f9b0..b141ce697679f29973d28559489b9a7350a69bc8 100644 --- a/kernel/relay.c +++ b/kernel/relay.c @@ -580,6 +580,11 @@ struct rchan *relay_open(const char *base_filename, return NULL; chan->buf = alloc_percpu(struct rchan_buf *); + if (!chan->buf) { + kfree(chan); + return NULL; + } + chan->version = RELAYFS_CHANNEL_VERSION; chan->n_subbufs = n_subbufs; chan->subbuf_size = subbuf_size; diff --git a/kernel/sched/core.c b/kernel/sched/core.c index d125602d43cb3a9617c3382458d12fe2de7fbef9..b1e87f06023df4242cb8d0a945b2eaa4e165fb26 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3930,7 +3930,8 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task) */ if (dl_prio(prio)) { if (!dl_prio(p->normal_prio) || - (pi_task && dl_entity_preempt(&pi_task->dl, &p->dl))) { + (pi_task && dl_prio(pi_task->prio) && + dl_entity_preempt(&pi_task->dl, &p->dl))) { p->dl.dl_boosted = 1; queue_flag |= ENQUEUE_REPLENISH; } else diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 3b52c9179d3375ad3213af6f02bbdaa3bee3a10d..63b887b27e84467148e5a077609566c9b75784f2 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -2714,7 +2714,7 @@ void task_tick_numa(struct rq *rq, struct task_struct *curr) /* * We don't care about NUMA placement if we don't have memory. */ - if (!curr->mm || (curr->flags & PF_EXITING) || work->next != work) + if ((curr->flags & (PF_EXITING | PF_KTHREAD)) || work->next != work) return; /* @@ -9319,7 +9319,15 @@ static int detach_tasks(struct lb_env *env) if (!can_migrate_task(p, env)) goto next; - load = task_h_load(p); + /* + * Depending of the number of CPUs and tasks and the + * cgroup hierarchy, task_h_load() can return a null + * value. Make sure that env->imbalance decreases + * otherwise detach_tasks() will stop only after + * detaching up to loop_max tasks. + */ + load = max_t(unsigned long, task_h_load(p), 1); + if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed) goto next; diff --git a/kernel/scs.c b/kernel/scs.c index ad74d13f2c0f8453249088e5f40a8aef94d72391..c8e53358e20a5e3d31db1c5b19baf470783db06c 100644 --- a/kernel/scs.c +++ b/kernel/scs.c @@ -185,36 +185,31 @@ int scs_prepare(struct task_struct *tsk, int node) } #ifdef CONFIG_DEBUG_STACK_USAGE -static inline unsigned long scs_used(struct task_struct *tsk) +static void scs_check_usage(struct task_struct *tsk) { + static unsigned long highest; + unsigned long *p = __scs_base(tsk); unsigned long *end = scs_magic(p); - unsigned long s = (unsigned long)p; - - while (p < end && READ_ONCE_NOCHECK(*p)) - p++; + unsigned long prev, curr = highest, used = 0; - return (unsigned long)p - s; -} - -static void scs_check_usage(struct task_struct *tsk) -{ - static DEFINE_SPINLOCK(lock); - static unsigned long highest; - unsigned long used = scs_used(tsk); + for (; p < end; ++p) { + if (!READ_ONCE_NOCHECK(*p)) + break; + used += sizeof(*p); + } - if (used <= highest) - return; + while (used > curr) { + prev = cmpxchg_relaxed(&highest, curr, used); - spin_lock(&lock); + if (prev == curr) { + pr_info("%s (%d): highest shadow stack usage: %lu bytes\n", + tsk->comm, task_pid_nr(tsk), used); + break; + } - if (used > highest) { - pr_info("%s (%d): highest shadow stack usage: %lu bytes\n", - tsk->comm, task_pid_nr(tsk), used); - highest = used; + curr = prev; } - - spin_unlock(&lock); } #else static inline void scs_check_usage(struct task_struct *tsk) diff --git a/kernel/time/timer.c b/kernel/time/timer.c index c4f2116ef4702e239cc5717192beb9d34f4d4003..a04483e9dfdf96128a78e247d55ed9905988e784 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -516,8 +516,8 @@ static int calc_wheel_index(unsigned long expires, unsigned long clk) * Force expire obscene large timeouts to expire at the * capacity limit of the wheel. */ - if (expires >= WHEEL_TIMEOUT_CUTOFF) - expires = WHEEL_TIMEOUT_MAX; + if (delta >= WHEEL_TIMEOUT_CUTOFF) + expires = clk + WHEEL_TIMEOUT_MAX; idx = calc_index(expires, LVL_DEPTH - 1); } diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 71a8ee6e60dca654973713c4a263fa1685d9cd4e..9a55c5bc52434408a6a7ba1d9022dcd0cc0b9fea 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -15,6 +15,9 @@ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include #include @@ -348,11 +351,12 @@ static void put_probe_ref(void) static void blk_trace_cleanup(struct blk_trace *bt) { + synchronize_rcu(); blk_trace_free(bt); put_probe_ref(); } -int blk_trace_remove(struct request_queue *q) +static int __blk_trace_remove(struct request_queue *q) { struct blk_trace *bt; @@ -365,6 +369,17 @@ int blk_trace_remove(struct request_queue *q) return 0; } + +int blk_trace_remove(struct request_queue *q) +{ + int ret; + + mutex_lock(&q->blk_trace_mutex); + ret = __blk_trace_remove(q); + mutex_unlock(&q->blk_trace_mutex); + + return ret; +} EXPORT_SYMBOL_GPL(blk_trace_remove); static ssize_t blk_dropped_read(struct file *filp, char __user *buffer, @@ -492,6 +507,16 @@ static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev, */ strreplace(buts->name, '/', '_'); + /* + * bdev can be NULL, as with scsi-generic, this is a helpful as + * we can be. + */ + if (q->blk_trace) { + pr_warn("Concurrent blktraces are not allowed on %s\n", + buts->name); + return -EBUSY; + } + bt = kzalloc(sizeof(*bt), GFP_KERNEL); if (!bt) return -ENOMEM; @@ -565,9 +590,8 @@ static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev, return ret; } -int blk_trace_setup(struct request_queue *q, char *name, dev_t dev, - struct block_device *bdev, - char __user *arg) +static int __blk_trace_setup(struct request_queue *q, char *name, dev_t dev, + struct block_device *bdev, char __user *arg) { struct blk_user_trace_setup buts; int ret; @@ -581,11 +605,24 @@ int blk_trace_setup(struct request_queue *q, char *name, dev_t dev, return ret; if (copy_to_user(arg, &buts, sizeof(buts))) { - blk_trace_remove(q); + __blk_trace_remove(q); return -EFAULT; } return 0; } + +int blk_trace_setup(struct request_queue *q, char *name, dev_t dev, + struct block_device *bdev, + char __user *arg) +{ + int ret; + + mutex_lock(&q->blk_trace_mutex); + ret = __blk_trace_setup(q, name, dev, bdev, arg); + mutex_unlock(&q->blk_trace_mutex); + + return ret; +} EXPORT_SYMBOL_GPL(blk_trace_setup); #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64) @@ -614,7 +651,7 @@ static int compat_blk_trace_setup(struct request_queue *q, char *name, return ret; if (copy_to_user(arg, &buts.name, ARRAY_SIZE(buts.name))) { - blk_trace_remove(q); + __blk_trace_remove(q); return -EFAULT; } @@ -622,11 +659,13 @@ static int compat_blk_trace_setup(struct request_queue *q, char *name, } #endif -int blk_trace_startstop(struct request_queue *q, int start) +static int __blk_trace_startstop(struct request_queue *q, int start) { int ret; - struct blk_trace *bt = q->blk_trace; + struct blk_trace *bt; + bt = rcu_dereference_protected(q->blk_trace, + lockdep_is_held(&q->blk_trace_mutex)); if (bt == NULL) return -EINVAL; @@ -661,6 +700,17 @@ int blk_trace_startstop(struct request_queue *q, int start) return ret; } + +int blk_trace_startstop(struct request_queue *q, int start) +{ + int ret; + + mutex_lock(&q->blk_trace_mutex); + ret = __blk_trace_startstop(q, start); + mutex_unlock(&q->blk_trace_mutex); + + return ret; +} EXPORT_SYMBOL_GPL(blk_trace_startstop); /* @@ -691,7 +741,7 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg) switch (cmd) { case BLKTRACESETUP: bdevname(bdev, b); - ret = blk_trace_setup(q, b, bdev->bd_dev, bdev, arg); + ret = __blk_trace_setup(q, b, bdev->bd_dev, bdev, arg); break; #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64) case BLKTRACESETUP32: @@ -702,10 +752,10 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg) case BLKTRACESTART: start = 1; case BLKTRACESTOP: - ret = blk_trace_startstop(q, start); + ret = __blk_trace_startstop(q, start); break; case BLKTRACETEARDOWN: - ret = blk_trace_remove(q); + ret = __blk_trace_remove(q); break; default: ret = -ENOTTY; @@ -723,18 +773,24 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg) **/ void blk_trace_shutdown(struct request_queue *q) { - if (q->blk_trace) { - blk_trace_startstop(q, 0); - blk_trace_remove(q); + mutex_lock(&q->blk_trace_mutex); + if (rcu_dereference_protected(q->blk_trace, + lockdep_is_held(&q->blk_trace_mutex))) { + __blk_trace_startstop(q, 0); + __blk_trace_remove(q); } + + mutex_unlock(&q->blk_trace_mutex); } #ifdef CONFIG_BLK_CGROUP static union kernfs_node_id * blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio) { - struct blk_trace *bt = q->blk_trace; + struct blk_trace *bt; + /* We don't use the 'bt' value here except as an optimization... */ + bt = rcu_dereference_protected(q->blk_trace, 1); if (!bt || !(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP)) return NULL; @@ -779,10 +835,14 @@ static void blk_add_trace_rq(struct request *rq, int error, unsigned int nr_bytes, u32 what, union kernfs_node_id *cgid) { - struct blk_trace *bt = rq->q->blk_trace; + struct blk_trace *bt; - if (likely(!bt)) + rcu_read_lock(); + bt = rcu_dereference(rq->q->blk_trace); + if (likely(!bt)) { + rcu_read_unlock(); return; + } if (blk_rq_is_passthrough(rq)) what |= BLK_TC_ACT(BLK_TC_PC); @@ -791,6 +851,7 @@ static void blk_add_trace_rq(struct request *rq, int error, __blk_add_trace(bt, blk_rq_trace_sector(rq), nr_bytes, req_op(rq), rq->cmd_flags, what, error, 0, NULL, cgid); + rcu_read_unlock(); } static void blk_add_trace_rq_insert(void *ignore, @@ -836,13 +897,18 @@ static void blk_add_trace_rq_complete(void *ignore, struct request *rq, static void blk_add_trace_bio(struct request_queue *q, struct bio *bio, u32 what, int error, union kernfs_node_id *cgid) { - struct blk_trace *bt = q->blk_trace; + struct blk_trace *bt; - if (likely(!bt)) + rcu_read_lock(); + bt = rcu_dereference(q->blk_trace); + if (likely(!bt)) { + rcu_read_unlock(); return; + } __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, bio_op(bio), bio->bi_opf, what, error, 0, NULL, cgid); + rcu_read_unlock(); } static void blk_add_trace_bio_bounce(void *ignore, @@ -893,11 +959,14 @@ static void blk_add_trace_getrq(void *ignore, blk_add_trace_bio(q, bio, BLK_TA_GETRQ, 0, blk_trace_bio_get_cgid(q, bio)); else { - struct blk_trace *bt = q->blk_trace; + struct blk_trace *bt; + rcu_read_lock(); + bt = rcu_dereference(q->blk_trace); if (bt) __blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_GETRQ, 0, 0, NULL, NULL); + rcu_read_unlock(); } } @@ -910,27 +979,35 @@ static void blk_add_trace_sleeprq(void *ignore, blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ, 0, blk_trace_bio_get_cgid(q, bio)); else { - struct blk_trace *bt = q->blk_trace; + struct blk_trace *bt; + rcu_read_lock(); + bt = rcu_dereference(q->blk_trace); if (bt) __blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_SLEEPRQ, 0, 0, NULL, NULL); + rcu_read_unlock(); } } static void blk_add_trace_plug(void *ignore, struct request_queue *q) { - struct blk_trace *bt = q->blk_trace; + struct blk_trace *bt; + rcu_read_lock(); + bt = rcu_dereference(q->blk_trace); if (bt) __blk_add_trace(bt, 0, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL, NULL); + rcu_read_unlock(); } static void blk_add_trace_unplug(void *ignore, struct request_queue *q, unsigned int depth, bool explicit) { - struct blk_trace *bt = q->blk_trace; + struct blk_trace *bt; + rcu_read_lock(); + bt = rcu_dereference(q->blk_trace); if (bt) { __be64 rpdu = cpu_to_be64(depth); u32 what; @@ -942,22 +1019,28 @@ static void blk_add_trace_unplug(void *ignore, struct request_queue *q, __blk_add_trace(bt, 0, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu, NULL); } + rcu_read_unlock(); } static void blk_add_trace_split(void *ignore, struct request_queue *q, struct bio *bio, unsigned int pdu) { - struct blk_trace *bt = q->blk_trace; + struct blk_trace *bt; + rcu_read_lock(); + bt = rcu_dereference(q->blk_trace); if (bt) { __be64 rpdu = cpu_to_be64(pdu); __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, bio_op(bio), bio->bi_opf, - BLK_TA_SPLIT, bio->bi_status, sizeof(rpdu), - &rpdu, blk_trace_bio_get_cgid(q, bio)); + BLK_TA_SPLIT, + blk_status_to_errno(bio->bi_status), + sizeof(rpdu), &rpdu, + blk_trace_bio_get_cgid(q, bio)); } + rcu_read_unlock(); } /** @@ -977,19 +1060,25 @@ static void blk_add_trace_bio_remap(void *ignore, struct request_queue *q, struct bio *bio, dev_t dev, sector_t from) { - struct blk_trace *bt = q->blk_trace; + struct blk_trace *bt; struct blk_io_trace_remap r; - if (likely(!bt)) + rcu_read_lock(); + bt = rcu_dereference(q->blk_trace); + if (likely(!bt)) { + rcu_read_unlock(); return; + } r.device_from = cpu_to_be32(dev); r.device_to = cpu_to_be32(bio_dev(bio)); r.sector_from = cpu_to_be64(from); __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, - bio_op(bio), bio->bi_opf, BLK_TA_REMAP, bio->bi_status, + bio_op(bio), bio->bi_opf, BLK_TA_REMAP, + blk_status_to_errno(bio->bi_status), sizeof(r), &r, blk_trace_bio_get_cgid(q, bio)); + rcu_read_unlock(); } /** @@ -1010,11 +1099,15 @@ static void blk_add_trace_rq_remap(void *ignore, struct request *rq, dev_t dev, sector_t from) { - struct blk_trace *bt = q->blk_trace; + struct blk_trace *bt; struct blk_io_trace_remap r; - if (likely(!bt)) + rcu_read_lock(); + bt = rcu_dereference(q->blk_trace); + if (likely(!bt)) { + rcu_read_unlock(); return; + } r.device_from = cpu_to_be32(dev); r.device_to = cpu_to_be32(disk_devt(rq->rq_disk)); @@ -1023,6 +1116,7 @@ static void blk_add_trace_rq_remap(void *ignore, __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), rq_data_dir(rq), 0, BLK_TA_REMAP, 0, sizeof(r), &r, blk_trace_request_get_cgid(q, rq)); + rcu_read_unlock(); } /** @@ -1040,14 +1134,19 @@ void blk_add_driver_data(struct request_queue *q, struct request *rq, void *data, size_t len) { - struct blk_trace *bt = q->blk_trace; + struct blk_trace *bt; - if (likely(!bt)) + rcu_read_lock(); + bt = rcu_dereference(q->blk_trace); + if (likely(!bt)) { + rcu_read_unlock(); return; + } __blk_add_trace(bt, blk_rq_trace_sector(rq), blk_rq_bytes(rq), 0, 0, BLK_TA_DRV_DATA, 0, len, data, blk_trace_request_get_cgid(q, rq)); + rcu_read_unlock(); } EXPORT_SYMBOL_GPL(blk_add_driver_data); @@ -1199,21 +1298,10 @@ static inline __u16 t_error(const struct trace_entry *ent) static __u64 get_pdu_int(const struct trace_entry *ent, bool has_cg) { - const __u64 *val = pdu_start(ent, has_cg); + const __be64 *val = pdu_start(ent, has_cg); return be64_to_cpu(*val); } -static void get_pdu_remap(const struct trace_entry *ent, - struct blk_io_trace_remap *r, bool has_cg) -{ - const struct blk_io_trace_remap *__r = pdu_start(ent, has_cg); - __u64 sector_from = __r->sector_from; - - r->device_from = be32_to_cpu(__r->device_from); - r->device_to = be32_to_cpu(__r->device_to); - r->sector_from = be64_to_cpu(sector_from); -} - typedef void (blk_log_action_t) (struct trace_iterator *iter, const char *act, bool has_cg); @@ -1339,13 +1427,13 @@ static void blk_log_with_error(struct trace_seq *s, static void blk_log_remap(struct trace_seq *s, const struct trace_entry *ent, bool has_cg) { - struct blk_io_trace_remap r = { .device_from = 0, }; + const struct blk_io_trace_remap *__r = pdu_start(ent, has_cg); - get_pdu_remap(ent, &r, has_cg); trace_seq_printf(s, "%llu + %u <- (%d,%d) %llu\n", t_sector(ent), t_sec(ent), - MAJOR(r.device_from), MINOR(r.device_from), - (unsigned long long)r.sector_from); + MAJOR(be32_to_cpu(__r->device_from)), + MINOR(be32_to_cpu(__r->device_from)), + be64_to_cpu(__r->sector_from)); } static void blk_log_plug(struct trace_seq *s, const struct trace_entry *ent, bool has_cg) @@ -1574,6 +1662,7 @@ static int blk_trace_remove_queue(struct request_queue *q) return -EINVAL; put_probe_ref(); + synchronize_rcu(); blk_trace_free(bt); return 0; } @@ -1735,6 +1824,7 @@ static ssize_t sysfs_blk_trace_attr_show(struct device *dev, struct hd_struct *p = dev_to_part(dev); struct request_queue *q; struct block_device *bdev; + struct blk_trace *bt; ssize_t ret = -ENXIO; bdev = bdget(part_devt(p)); @@ -1747,21 +1837,23 @@ static ssize_t sysfs_blk_trace_attr_show(struct device *dev, mutex_lock(&q->blk_trace_mutex); + bt = rcu_dereference_protected(q->blk_trace, + lockdep_is_held(&q->blk_trace_mutex)); if (attr == &dev_attr_enable) { - ret = sprintf(buf, "%u\n", !!q->blk_trace); + ret = sprintf(buf, "%u\n", !!bt); goto out_unlock_bdev; } - if (q->blk_trace == NULL) + if (bt == NULL) ret = sprintf(buf, "disabled\n"); else if (attr == &dev_attr_act_mask) - ret = blk_trace_mask2str(buf, q->blk_trace->act_mask); + ret = blk_trace_mask2str(buf, bt->act_mask); else if (attr == &dev_attr_pid) - ret = sprintf(buf, "%u\n", q->blk_trace->pid); + ret = sprintf(buf, "%u\n", bt->pid); else if (attr == &dev_attr_start_lba) - ret = sprintf(buf, "%llu\n", q->blk_trace->start_lba); + ret = sprintf(buf, "%llu\n", bt->start_lba); else if (attr == &dev_attr_end_lba) - ret = sprintf(buf, "%llu\n", q->blk_trace->end_lba); + ret = sprintf(buf, "%llu\n", bt->end_lba); out_unlock_bdev: mutex_unlock(&q->blk_trace_mutex); @@ -1778,6 +1870,7 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev, struct block_device *bdev; struct request_queue *q; struct hd_struct *p; + struct blk_trace *bt; u64 value; ssize_t ret = -EINVAL; @@ -1808,8 +1901,10 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev, mutex_lock(&q->blk_trace_mutex); + bt = rcu_dereference_protected(q->blk_trace, + lockdep_is_held(&q->blk_trace_mutex)); if (attr == &dev_attr_enable) { - if (!!value == !!q->blk_trace) { + if (!!value == !!bt) { ret = 0; goto out_unlock_bdev; } @@ -1821,18 +1916,21 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev, } ret = 0; - if (q->blk_trace == NULL) + if (bt == NULL) { ret = blk_trace_setup_queue(q, bdev); + bt = rcu_dereference_protected(q->blk_trace, + lockdep_is_held(&q->blk_trace_mutex)); + } if (ret == 0) { if (attr == &dev_attr_act_mask) - q->blk_trace->act_mask = value; + bt->act_mask = value; else if (attr == &dev_attr_pid) - q->blk_trace->pid = value; + bt->pid = value; else if (attr == &dev_attr_start_lba) - q->blk_trace->start_lba = value; + bt->start_lba = value; else if (attr == &dev_attr_end_lba) - q->blk_trace->end_lba = value; + bt->end_lba = value; } out_unlock_bdev: diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 04b5e4cef44de25cc940e9909b38c025c9bae138..9e93ef4969e7508359b0f07bc14e44657f6476c6 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -7677,6 +7677,19 @@ static int allocate_trace_buffers(struct trace_array *tr, int size) */ allocate_snapshot = false; #endif + + /* + * Because of some magic with the way alloc_percpu() works on + * x86_64, we need to synchronize the pgd of all the tables, + * otherwise the trace events that happen in x86_64 page fault + * handlers can't cope with accessing the chance that a + * alloc_percpu()'d memory might be touched in the page fault trace + * event. Oh, and we need to audit all other alloc_percpu() and vmalloc() + * calls in tracing, because something might get triggered within a + * page fault trace event! + */ + vmalloc_sync_mappings(); + return 0; } diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c index 6fb5eb7b57dc03eb5b7de6c4253e7ea8b2d89a0c..13f5013373085dfb59eed397f3d313ab7ced92d9 100644 --- a/kernel/trace/trace_events_trigger.c +++ b/kernel/trace/trace_events_trigger.c @@ -223,11 +223,17 @@ static int event_trigger_regex_open(struct inode *inode, struct file *file) static int trigger_process_regex(struct trace_event_file *file, char *buff) { - char *command, *next = buff; + char *command, *next; struct event_command *p; int ret = -EINVAL; + next = buff = skip_spaces(buff); command = strsep(&next, ": \t"); + if (next) { + next = skip_spaces(next); + if (!*next) + next = NULL; + } command = (command[0] != '!') ? command : command + 1; mutex_lock(&trigger_cmd_mutex); @@ -630,8 +636,14 @@ event_trigger_callback(struct event_command *cmd_ops, int ret; /* separate the trigger from the filter (t:n [if filter]) */ - if (param && isdigit(param[0])) + if (param && isdigit(param[0])) { trigger = strsep(¶m, " \t"); + if (param) { + param = skip_spaces(param); + if (!*param) + param = NULL; + } + } trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger); @@ -1342,6 +1354,11 @@ int event_enable_trigger_func(struct event_command *cmd_ops, trigger = strsep(¶m, " \t"); if (!trigger) return -EINVAL; + if (param) { + param = skip_spaces(param); + if (!*param) + param = NULL; + } system = strsep(&trigger, ":"); if (!trigger) diff --git a/kernel/umh.c b/kernel/umh.c index 6ff9905250ff0563713c64585a74e888de70f343..a5daa8534d0ed2ce7b0e4e0c3cd3e1a94e4a88e3 100644 --- a/kernel/umh.c +++ b/kernel/umh.c @@ -404,6 +404,11 @@ EXPORT_SYMBOL(call_usermodehelper_setup); * Runs a user-space application. The application is started * asynchronously if wait is not set, and runs as a child of system workqueues. * (ie. it runs with full root capabilities and optimized affinity). + * + * Note: successful return value does not guarantee the helper was called at + * all. You can't rely on sub_info->{init,cleanup} being called even for + * UMH_WAIT_* wait modes as STATIC_USERMODEHELPER_PATH="" turns all helpers + * into a successful no-op. */ int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait) { diff --git a/lib/Makefile b/lib/Makefile index ec649b0b711e1837a2a6ffaf2953c04a3a4205f5..f29a816df3ecaee29abbefd7ae91293dd48e0e7e 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -262,6 +262,8 @@ obj-$(CONFIG_UCS2_STRING) += ucs2_string.o obj-$(CONFIG_UBSAN) += ubsan.o UBSAN_SANITIZE_ubsan.o := n +KASAN_SANITIZE_ubsan.o := n +CFLAGS_ubsan.o := $(call cc-option, -fno-stack-protector) $(DISABLE_STACKLEAK_PLUGIN) obj-$(CONFIG_SBITMAP) += sbitmap.o diff --git a/lib/mpi/longlong.h b/lib/mpi/longlong.h index e01b705556aa68fb156d4e481c07c9677dde83d8..6c5229f98c9eb409370cd5d58b7afae8aa4cabf1 100644 --- a/lib/mpi/longlong.h +++ b/lib/mpi/longlong.h @@ -671,7 +671,7 @@ do { \ ************** MIPS/64 ************** ***************************************/ #if (defined(__mips) && __mips >= 3) && W_TYPE_SIZE == 64 -#if defined(__mips_isa_rev) && __mips_isa_rev >= 6 +#if defined(__mips_isa_rev) && __mips_isa_rev >= 6 && defined(CONFIG_CC_IS_GCC) /* * GCC ends up emitting a __multi3 intrinsic call for MIPS64r6 with the plain C * code below, so we special case MIPS64r6 until the compiler can do better. diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c index ee4fa039074019d035294ff666dec8141555ba93..56a5981ba968f31020fc73d442933f68aa8c032b 100644 --- a/lib/strncpy_from_user.c +++ b/lib/strncpy_from_user.c @@ -30,13 +30,6 @@ static inline long do_strncpy_from_user(char *dst, const char __user *src, const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS; unsigned long res = 0; - /* - * Truncate 'max' to the user-specified limit, so that - * we only have one limit we need to check in the loop - */ - if (max > count) - max = count; - if (IS_UNALIGNED(src, dst)) goto byte_at_a_time; @@ -114,12 +107,20 @@ long strncpy_from_user(char *dst, const char __user *src, long count) unsigned long max = max_addr - src_addr; long retval; + /* + * Truncate 'max' to the user-specified limit, so that + * we only have one limit we need to check in the loop + */ + if (max > count) + max = count; + kasan_check_write(dst, count); check_object_size(dst, count, false); - user_access_begin(); - retval = do_strncpy_from_user(dst, src, count, max); - user_access_end(); - return retval; + if (user_access_begin(VERIFY_READ, src, max)) { + retval = do_strncpy_from_user(dst, src, count, max); + user_access_end(); + return retval; + } } return -EFAULT; } diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c index 1cf563cd6bf45f76a459e82cf06c817971b67d87..37628e7f6825fc8341e49ef402b45c861a5b96aa 100644 --- a/lib/strnlen_user.c +++ b/lib/strnlen_user.c @@ -32,13 +32,6 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long align, res = 0; unsigned long c; - /* - * Truncate 'max' to the user-specified limit, so that - * we only have one limit we need to check in the loop - */ - if (max > count) - max = count; - /* * Do everything aligned. But that means that we * need to also expand the maximum.. @@ -115,10 +108,18 @@ long strnlen_user(const char __user *str, long count) unsigned long max = max_addr - src_addr; long retval; - user_access_begin(); - retval = do_strnlen_user(str, count, max); - user_access_end(); - return retval; + /* + * Truncate 'max' to the user-specified limit, so that + * we only have one limit we need to check in the loop + */ + if (max > count) + max = count; + + if (user_access_begin(VERIFY_READ, str, max)) { + retval = do_strnlen_user(str, count, max); + user_access_end(); + return retval; + } } return 0; } diff --git a/lib/zlib_inflate/inffast.c b/lib/zlib_inflate/inffast.c index 2c13ecc5bb2c7ea7563e7096a61b93d13275d675..ed1f3df27260204943d6c51efcb7b4aba1fdb220 100644 --- a/lib/zlib_inflate/inffast.c +++ b/lib/zlib_inflate/inffast.c @@ -10,17 +10,6 @@ #ifndef ASMINF -/* Allow machine dependent optimization for post-increment or pre-increment. - Based on testing to date, - Pre-increment preferred for: - - PowerPC G3 (Adler) - - MIPS R5000 (Randers-Pehrson) - Post-increment preferred for: - - none - No measurable difference: - - Pentium III (Anderson) - - M68060 (Nikl) - */ union uu { unsigned short us; unsigned char b[2]; @@ -38,16 +27,6 @@ get_unaligned16(const unsigned short *p) return mm.us; } -#ifdef POSTINC -# define OFF 0 -# define PUP(a) *(a)++ -# define UP_UNALIGNED(a) get_unaligned16((a)++) -#else -# define OFF 1 -# define PUP(a) *++(a) -# define UP_UNALIGNED(a) get_unaligned16(++(a)) -#endif - /* Decode literal, length, and distance codes and write out the resulting literal and match bytes until either not enough input or output is @@ -115,9 +94,9 @@ void inflate_fast(z_streamp strm, unsigned start) /* copy state to local variables */ state = (struct inflate_state *)strm->state; - in = strm->next_in - OFF; + in = strm->next_in; last = in + (strm->avail_in - 5); - out = strm->next_out - OFF; + out = strm->next_out; beg = out - (start - strm->avail_out); end = out + (strm->avail_out - 257); #ifdef INFLATE_STRICT @@ -138,9 +117,9 @@ void inflate_fast(z_streamp strm, unsigned start) input data or output space */ do { if (bits < 15) { - hold += (unsigned long)(PUP(in)) << bits; + hold += (unsigned long)(*in++) << bits; bits += 8; - hold += (unsigned long)(PUP(in)) << bits; + hold += (unsigned long)(*in++) << bits; bits += 8; } this = lcode[hold & lmask]; @@ -150,14 +129,14 @@ void inflate_fast(z_streamp strm, unsigned start) bits -= op; op = (unsigned)(this.op); if (op == 0) { /* literal */ - PUP(out) = (unsigned char)(this.val); + *out++ = (unsigned char)(this.val); } else if (op & 16) { /* length base */ len = (unsigned)(this.val); op &= 15; /* number of extra bits */ if (op) { if (bits < op) { - hold += (unsigned long)(PUP(in)) << bits; + hold += (unsigned long)(*in++) << bits; bits += 8; } len += (unsigned)hold & ((1U << op) - 1); @@ -165,9 +144,9 @@ void inflate_fast(z_streamp strm, unsigned start) bits -= op; } if (bits < 15) { - hold += (unsigned long)(PUP(in)) << bits; + hold += (unsigned long)(*in++) << bits; bits += 8; - hold += (unsigned long)(PUP(in)) << bits; + hold += (unsigned long)(*in++) << bits; bits += 8; } this = dcode[hold & dmask]; @@ -180,10 +159,10 @@ void inflate_fast(z_streamp strm, unsigned start) dist = (unsigned)(this.val); op &= 15; /* number of extra bits */ if (bits < op) { - hold += (unsigned long)(PUP(in)) << bits; + hold += (unsigned long)(*in++) << bits; bits += 8; if (bits < op) { - hold += (unsigned long)(PUP(in)) << bits; + hold += (unsigned long)(*in++) << bits; bits += 8; } } @@ -205,13 +184,13 @@ void inflate_fast(z_streamp strm, unsigned start) state->mode = BAD; break; } - from = window - OFF; + from = window; if (write == 0) { /* very common case */ from += wsize - op; if (op < len) { /* some from window */ len -= op; do { - PUP(out) = PUP(from); + *out++ = *from++; } while (--op); from = out - dist; /* rest from output */ } @@ -222,14 +201,14 @@ void inflate_fast(z_streamp strm, unsigned start) if (op < len) { /* some from end of window */ len -= op; do { - PUP(out) = PUP(from); + *out++ = *from++; } while (--op); - from = window - OFF; + from = window; if (write < len) { /* some from start of window */ op = write; len -= op; do { - PUP(out) = PUP(from); + *out++ = *from++; } while (--op); from = out - dist; /* rest from output */ } @@ -240,21 +219,21 @@ void inflate_fast(z_streamp strm, unsigned start) if (op < len) { /* some from window */ len -= op; do { - PUP(out) = PUP(from); + *out++ = *from++; } while (--op); from = out - dist; /* rest from output */ } } while (len > 2) { - PUP(out) = PUP(from); - PUP(out) = PUP(from); - PUP(out) = PUP(from); + *out++ = *from++; + *out++ = *from++; + *out++ = *from++; len -= 3; } if (len) { - PUP(out) = PUP(from); + *out++ = *from++; if (len > 1) - PUP(out) = PUP(from); + *out++ = *from++; } } else { @@ -264,29 +243,29 @@ void inflate_fast(z_streamp strm, unsigned start) from = out - dist; /* copy direct from output */ /* minimum length is three */ /* Align out addr */ - if (!((long)(out - 1 + OFF) & 1)) { - PUP(out) = PUP(from); + if (!((long)(out - 1) & 1)) { + *out++ = *from++; len--; } - sout = (unsigned short *)(out - OFF); + sout = (unsigned short *)(out); if (dist > 2) { unsigned short *sfrom; - sfrom = (unsigned short *)(from - OFF); + sfrom = (unsigned short *)(from); loops = len >> 1; do #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS - PUP(sout) = PUP(sfrom); + *sout++ = *sfrom++; #else - PUP(sout) = UP_UNALIGNED(sfrom); + *sout++ = get_unaligned16(sfrom++); #endif while (--loops); - out = (unsigned char *)sout + OFF; - from = (unsigned char *)sfrom + OFF; + out = (unsigned char *)sout; + from = (unsigned char *)sfrom; } else { /* dist == 1 or dist == 2 */ unsigned short pat16; - pat16 = *(sout-1+OFF); + pat16 = *(sout-1); if (dist == 1) { union uu mm; /* copy one char pattern to both bytes */ @@ -296,12 +275,12 @@ void inflate_fast(z_streamp strm, unsigned start) } loops = len >> 1; do - PUP(sout) = pat16; + *sout++ = pat16; while (--loops); - out = (unsigned char *)sout + OFF; + out = (unsigned char *)sout; } if (len & 1) - PUP(out) = PUP(from); + *out++ = *from++; } } else if ((op & 64) == 0) { /* 2nd level distance code */ @@ -336,8 +315,8 @@ void inflate_fast(z_streamp strm, unsigned start) hold &= (1U << bits) - 1; /* update state and return */ - strm->next_in = in + OFF; - strm->next_out = out + OFF; + strm->next_in = in; + strm->next_out = out; strm->avail_in = (unsigned)(in < last ? 5 + (last - in) : 5 - (in - last)); strm->avail_out = (unsigned)(out < end ? 257 + (end - out) : 257 - (out - end)); diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 8847f84da129b3330f25287c330c822a91edeff6..bd229a949abd63a28a70c56e9d3ace72336b05aa 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2193,6 +2193,8 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, spinlock_t *ptl; struct mm_struct *mm = vma->vm_mm; unsigned long haddr = address & HPAGE_PMD_MASK; + bool was_locked = false; + pmd_t _pmd; mmu_notifier_invalidate_range_start(mm, haddr, haddr + HPAGE_PMD_SIZE); ptl = pmd_lock(mm, pmd); @@ -2202,11 +2204,32 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, * pmd against. Otherwise we can end up replacing wrong page. */ VM_BUG_ON(freeze && !page); - if (page && page != pmd_page(*pmd)) - goto out; + if (page) { + VM_WARN_ON_ONCE(!PageLocked(page)); + was_locked = true; + if (page != pmd_page(*pmd)) + goto out; + } +repeat: if (pmd_trans_huge(*pmd)) { - page = pmd_page(*pmd); + if (!page) { + page = pmd_page(*pmd); + if (unlikely(!trylock_page(page))) { + get_page(page); + _pmd = *pmd; + spin_unlock(ptl); + lock_page(page); + spin_lock(ptl); + if (unlikely(!pmd_same(*pmd, _pmd))) { + unlock_page(page); + put_page(page); + page = NULL; + goto repeat; + } + put_page(page); + } + } if (PageMlocked(page)) clear_page_mlock(page); } else if (!(pmd_devmap(*pmd) || is_pmd_migration_entry(*pmd))) @@ -2214,6 +2237,8 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, __split_huge_pmd_locked(vma, pmd, haddr, freeze); out: spin_unlock(ptl); + if (!was_locked && page) + unlock_page(page); mmu_notifier_invalidate_range_end(mm, haddr, haddr + HPAGE_PMD_SIZE); } diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 082ac42cca234d259e9f932d23e7e4910f61af3a..6c6c0a981a911dbcc424162f4aa8836adedd3f42 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -4928,7 +4928,6 @@ static void __mem_cgroup_clear_mc(void) if (!mem_cgroup_is_root(mc.to)) page_counter_uncharge(&mc.to->memory, mc.moved_swap); - mem_cgroup_id_get_many(mc.to, mc.moved_swap); css_put_many(&mc.to->css, mc.moved_swap); mc.moved_swap = 0; @@ -5119,7 +5118,8 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd, ent = target.ent; if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) { mc.precharge--; - /* we fixup refcnts and charges later. */ + mem_cgroup_id_get_many(mc.to, 1); + /* we fixup other refcnts and charges later. */ mc.moved_swap++; } break; diff --git a/mm/mremap.c b/mm/mremap.c index d18f8429596f6a81e99b2bc1b83012e15f1db77f..5651928db60d9b173405ba6e76c37c9683eee95a 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -223,7 +223,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma, new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr); if (!new_pmd) break; - if (is_swap_pmd(*old_pmd) || pmd_trans_huge(*old_pmd)) { + if (is_swap_pmd(*old_pmd) || pmd_trans_huge(*old_pmd) || pmd_devmap(*old_pmd)) { if (extent == HPAGE_PMD_SIZE) { bool moved; /* See comment in move_ptes() */ diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 496ad8492c92c29660b8e0cc9071990be2c94772..87fa2336b76e958c295827797d38551da8a4b887 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1480,6 +1480,7 @@ void set_zone_contiguous(struct zone *zone) if (!__pageblock_pfn_to_page(block_start_pfn, block_end_pfn, zone)) return; + cond_resched(); } /* We confirm that there is no hole */ diff --git a/mm/shmem.c b/mm/shmem.c index 05399bbc865e9b4db126646005f4e75bd0aa2c47..419d09886a9ce13b39ed84a3b27f9588ea1907c9 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -2132,7 +2132,11 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user) struct shmem_inode_info *info = SHMEM_I(inode); int retval = -ENOMEM; - spin_lock_irq(&info->lock); + /* + * What serializes the accesses to info->flags? + * ipc_lock_object() when called from shmctl_do_lock(), + * no serialization needed when called from shm_destroy(). + */ if (lock && !(info->flags & VM_LOCKED)) { if (!user_shm_lock(inode->i_size, user)) goto out_nomem; @@ -2147,7 +2151,6 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user) retval = 0; out_nomem: - spin_unlock_irq(&info->lock); return retval; } diff --git a/mm/slab_common.c b/mm/slab_common.c index 20da89561fd2363ceff61c4b27fda157a78df85e..7fe72acfa501cd27370c49cad3221fb6f1419224 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -1470,7 +1470,7 @@ void kzfree(const void *p) if (unlikely(ZERO_OR_NULL_PTR(mem))) return; ks = ksize(mem); - memset(mem, 0, ks); + memzero_explicit(mem, ks); kfree(mem); } EXPORT_SYMBOL(kzfree); diff --git a/mm/slub.c b/mm/slub.c index fd5f9337bc7a8accbfae91170915302fded168de..f950312acfdee3d1993ae63846a9ec4516dc688d 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -665,6 +665,20 @@ static void slab_fix(struct kmem_cache *s, char *fmt, ...) va_end(args); } +static bool freelist_corrupted(struct kmem_cache *s, struct page *page, + void *freelist, void *nextfree) +{ + if ((s->flags & SLAB_CONSISTENCY_CHECKS) && + !check_valid_pointer(s, page, nextfree)) { + object_err(s, page, freelist, "Freechain corrupt"); + freelist = NULL; + slab_fix(s, "Isolate corrupted freechain"); + return true; + } + + return false; +} + static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p) { unsigned int off; /* Offset of last byte */ @@ -1374,6 +1388,11 @@ static inline void inc_slabs_node(struct kmem_cache *s, int node, static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects) {} +static bool freelist_corrupted(struct kmem_cache *s, struct page *page, + void *freelist, void *nextfree) +{ + return false; +} #endif /* CONFIG_SLUB_DEBUG */ /* @@ -2107,6 +2126,14 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page, void *prior; unsigned long counters; + /* + * If 'nextfree' is invalid, it is possible that the object at + * 'freelist' is already corrupted. So isolate all objects + * starting at 'freelist'. + */ + if (freelist_corrupted(s, page, freelist, nextfree)) + break; + do { prior = page->freelist; counters = page->counters; @@ -5684,7 +5711,8 @@ static void memcg_propagate_slab_attrs(struct kmem_cache *s) */ if (buffer) buf = buffer; - else if (root_cache->max_attr_size < ARRAY_SIZE(mbuf)) + else if (root_cache->max_attr_size < ARRAY_SIZE(mbuf) && + !IS_ENABLED(CONFIG_SLUB_STATS)) buf = mbuf; else { buffer = (char *) get_zeroed_page(GFP_KERNEL); @@ -5837,8 +5865,10 @@ static int sysfs_slab_add(struct kmem_cache *s) s->kobj.kset = kset; err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name); - if (err) + if (err) { + kobject_put(&s->kobj); goto out; + } err = sysfs_create_group(&s->kobj, &slab_attr_group); if (err) diff --git a/mm/swap_state.c b/mm/swap_state.c index f8b76b0b2639130d6a8a63554dafffab476778d6..affc610ac7ba1aca82abe010e3ea8b5e5d2406fd 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -23,6 +23,7 @@ #include #include +#include "internal.h" /* * swapper_space is a fiction, retained to simplify the path through @@ -414,7 +415,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, /* * call radix_tree_preload() while we can wait. */ - err = radix_tree_maybe_preload(gfp_mask & GFP_KERNEL); + err = radix_tree_maybe_preload(gfp_mask & GFP_RECLAIM_MASK); if (err) break; diff --git a/mm/util.c b/mm/util.c index b8e849125eccdc49eb135f3949f94c4f5c0d766a..44ffa7caed6eafbb877e69f58f9a3635555d8186 100644 --- a/mm/util.c +++ b/mm/util.c @@ -419,6 +419,24 @@ void kvfree(const void *addr) } EXPORT_SYMBOL(kvfree); +/** + * kvfree_sensitive - Free a data object containing sensitive information. + * @addr: address of the data object to be freed. + * @len: length of the data object. + * + * Use the special memzero_explicit() function to clear the content of a + * kvmalloc'ed object containing sensitive data to make sure that the + * compiler won't optimize out the data clearing. + */ +void kvfree_sensitive(const void *addr, size_t len) +{ + if (likely(!ZERO_OR_NULL_PTR(addr))) { + memzero_explicit((void *)addr, len); + kvfree(addr); + } +} +EXPORT_SYMBOL(kvfree_sensitive); + static inline void *__page_rmapping(struct page *page) { unsigned long mapping; diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index 63fa6ea2341e5f103ab6a3ba58a6e966c5af91ff..89d074ce10fc62f5925891c7ad81de3f6e501bc7 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c @@ -639,8 +639,10 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname, break; case SO_BINDTODEVICE: - if (optlen > IFNAMSIZ) - optlen = IFNAMSIZ; + if (optlen > IFNAMSIZ - 1) + optlen = IFNAMSIZ - 1; + + memset(devname, 0, sizeof(devname)); if (copy_from_user(devname, optval, optlen)) { res = -EFAULT; diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c index cec31769bb3fc8e1d2814e71b0540f78e92ef902..f0abbbdafe07f9eccad7db1638c75c4e848ff097 100644 --- a/net/batman-adv/bat_v_ogm.c +++ b/net/batman-adv/bat_v_ogm.c @@ -734,7 +734,7 @@ static void batadv_v_ogm_process(const struct sk_buff *skb, int ogm_offset, orig_node = batadv_v_ogm_orig_get(bat_priv, ogm_packet->orig); if (!orig_node) - return; + goto out; neigh_node = batadv_neigh_node_get_or_create(orig_node, if_incoming, ethhdr->h_source); diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c index 7a7dcac20566752627ba05627b6b5bc4df0e058a..7aacec24958eb36e0bb4cf17b09265f280cda03b 100644 --- a/net/batman-adv/network-coding.c +++ b/net/batman-adv/network-coding.c @@ -1017,15 +1017,8 @@ static struct batadv_nc_path *batadv_nc_get_path(struct batadv_priv *bat_priv, */ static u8 batadv_nc_random_weight_tq(u8 tq) { - u8 rand_val, rand_tq; - - get_random_bytes(&rand_val, sizeof(rand_val)); - /* randomize the estimated packet loss (max TQ - estimated TQ) */ - rand_tq = rand_val * (BATADV_TQ_MAX_VALUE - tq); - - /* normalize the randomized packet loss */ - rand_tq /= BATADV_TQ_MAX_VALUE; + u8 rand_tq = prandom_u32_max(BATADV_TQ_MAX_VALUE + 1 - tq); /* convert to (randomized) estimated tq again */ return BATADV_TQ_MAX_VALUE - rand_tq; diff --git a/net/batman-adv/sysfs.c b/net/batman-adv/sysfs.c index 2ef9b136fc394de1d0e5275debbf7bc8a6da2c5c..ed789845d195c113955629b079187f0adf522b30 100644 --- a/net/batman-adv/sysfs.c +++ b/net/batman-adv/sysfs.c @@ -1081,7 +1081,7 @@ static ssize_t batadv_store_throughput_override(struct kobject *kobj, ret = batadv_parse_throughput(net_dev, buff, "throughput_override", &tp_override); if (!ret) - return count; + goto out; old_tp_override = atomic_read(&hard_iface->bat_v.throughput_override); if (old_tp_override == tp_override) @@ -1114,6 +1114,7 @@ static ssize_t batadv_show_throughput_override(struct kobject *kobj, tp_override = atomic_read(&hard_iface->bat_v.throughput_override); + batadv_hardif_put(hard_iface); return sprintf(buff, "%u.%u MBit\n", tp_override / 10, tp_override % 10); } diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 363dc85bbc5c9edea164020c1dd3ff51a777fac6..56e4ae7d7f638c78485718d7ddde18c7849f07b0 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -3775,6 +3775,7 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev, case 0x11: /* Unsupported Feature or Parameter Value */ case 0x1c: /* SCO interval rejected */ case 0x1a: /* Unsupported Remote Feature */ + case 0x1e: /* Invalid LMP Parameters */ case 0x1f: /* Unspecified error */ case 0x20: /* Unsupported LMP Parameter value */ if (conn->out) { diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index e870cfc85b1476e0cd9456a51e6ee161d962d4f3..14ff034e561c5118980817965c4481a0336ff081 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -190,8 +190,8 @@ struct net_bridge_port_group { struct rcu_head rcu; struct timer_list timer; struct br_ip addr; + unsigned char eth_addr[ETH_ALEN] __aligned(2); unsigned char flags; - unsigned char eth_addr[ETH_ALEN]; }; struct net_bridge_mdb_entry diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c index b09ec869c913e11e90eeb2f1347f50bf98a49108..0ab7688bb72491e08c7f0860a652103be6de2011 100644 --- a/net/bridge/netfilter/nft_reject_bridge.c +++ b/net/bridge/netfilter/nft_reject_bridge.c @@ -34,6 +34,12 @@ static void nft_reject_br_push_etherhdr(struct sk_buff *oldskb, ether_addr_copy(eth->h_dest, eth_hdr(oldskb)->h_source); eth->h_proto = eth_hdr(oldskb)->h_proto; skb_pull(nskb, ETH_HLEN); + + if (skb_vlan_tag_present(oldskb)) { + u16 vid = skb_vlan_tag_get(oldskb); + + __vlan_hwaccel_put_tag(nskb, oldskb->vlan_proto, vid); + } } static int nft_bridge_iphdr_validate(struct sk_buff *skb) diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 92b2641ab93b8d21d36fc72989ad4ec8a739adb2..b026128a89d76631460a1658a670ac5b8e0e0ba1 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -384,6 +384,7 @@ static void target_copy(struct ceph_osd_request_target *dest, dest->size = src->size; dest->min_size = src->min_size; dest->sort_bitwise = src->sort_bitwise; + dest->recovery_deletes = src->recovery_deletes; dest->flags = src->flags; dest->paused = src->paused; @@ -3444,7 +3445,9 @@ static void handle_reply(struct ceph_osd *osd, struct ceph_msg *msg) * supported. */ req->r_t.target_oloc.pool = m.redirect.oloc.pool; - req->r_flags |= CEPH_OSD_FLAG_REDIRECTED; + req->r_flags |= CEPH_OSD_FLAG_REDIRECTED | + CEPH_OSD_FLAG_IGNORE_OVERLAY | + CEPH_OSD_FLAG_IGNORE_CACHE; req->r_tid = 0; __submit_request(req, false); goto out_unlock_osdc; diff --git a/net/core/dev.c b/net/core/dev.c index e8d1af5f95007dedbaea62d0856078442492329a..4af3fa399e229a1433c473fd749919910993e116 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -83,6 +83,7 @@ #include #include #include +#include #include #include #include @@ -196,7 +197,7 @@ static DEFINE_SPINLOCK(napi_hash_lock); static unsigned int napi_gen_id = NR_CPUS; static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8); -static seqcount_t devnet_rename_seq; +static DECLARE_RWSEM(devnet_rename_sem); static inline void dev_base_seq_inc(struct net *net) { @@ -900,33 +901,28 @@ EXPORT_SYMBOL(dev_get_by_napi_id); * @net: network namespace * @name: a pointer to the buffer where the name will be stored. * @ifindex: the ifindex of the interface to get the name from. - * - * The use of raw_seqcount_begin() and cond_resched() before - * retrying is required as we want to give the writers a chance - * to complete when CONFIG_PREEMPT is not set. */ int netdev_get_name(struct net *net, char *name, int ifindex) { struct net_device *dev; - unsigned int seq; + int ret; -retry: - seq = raw_seqcount_begin(&devnet_rename_seq); + down_read(&devnet_rename_sem); rcu_read_lock(); + dev = dev_get_by_index_rcu(net, ifindex); if (!dev) { - rcu_read_unlock(); - return -ENODEV; + ret = -ENODEV; + goto out; } strcpy(name, dev->name); - rcu_read_unlock(); - if (read_seqcount_retry(&devnet_rename_seq, seq)) { - cond_resched(); - goto retry; - } - return 0; + ret = 0; +out: + rcu_read_unlock(); + up_read(&devnet_rename_sem); + return ret; } /** @@ -1191,10 +1187,10 @@ int dev_change_name(struct net_device *dev, const char *newname) if (dev->flags & IFF_UP) return -EBUSY; - write_seqcount_begin(&devnet_rename_seq); + down_write(&devnet_rename_sem); if (strncmp(newname, dev->name, IFNAMSIZ) == 0) { - write_seqcount_end(&devnet_rename_seq); + up_write(&devnet_rename_sem); return 0; } @@ -1202,7 +1198,7 @@ int dev_change_name(struct net_device *dev, const char *newname) err = dev_get_valid_name(net, dev, newname); if (err < 0) { - write_seqcount_end(&devnet_rename_seq); + up_write(&devnet_rename_sem); return err; } @@ -1217,11 +1213,11 @@ int dev_change_name(struct net_device *dev, const char *newname) if (ret) { memcpy(dev->name, oldname, IFNAMSIZ); dev->name_assign_type = old_assign_type; - write_seqcount_end(&devnet_rename_seq); + up_write(&devnet_rename_sem); return ret; } - write_seqcount_end(&devnet_rename_seq); + up_write(&devnet_rename_sem); netdev_adjacent_rename_links(dev, oldname); @@ -1242,7 +1238,7 @@ int dev_change_name(struct net_device *dev, const char *newname) /* err >= 0 after dev_alloc_name() or stores the first errno */ if (err >= 0) { err = ret; - write_seqcount_begin(&devnet_rename_seq); + down_write(&devnet_rename_sem); memcpy(dev->name, oldname, IFNAMSIZ); memcpy(oldname, newname, IFNAMSIZ); dev->name_assign_type = old_assign_type; @@ -7346,11 +7342,13 @@ static void netdev_sync_lower_features(struct net_device *upper, netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n", &feature, lower->name); lower->wanted_features &= ~feature; - netdev_update_features(lower); + __netdev_update_features(lower); if (unlikely(lower->features & feature)) netdev_WARN(upper, "failed to disable %pNF on %s!\n", &feature, lower->name); + else + netdev_features_change(lower); } } } @@ -7772,6 +7770,13 @@ int register_netdevice(struct net_device *dev) rcu_barrier(); dev->reg_state = NETREG_UNREGISTERED; + /* We should put the kobject that hold in + * netdev_unregister_kobject(), otherwise + * the net device cannot be freed when + * driver calls free_netdev(), because the + * kobject is being hold. + */ + kobject_put(&dev->dev.kobj); } /* * Prevent userspace races by waiting until the network diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c index 70ccda233bd1f1aab18535e6d9d0419bb9a1a23b..ef9fe5f95093b61e3dba7d44d6f55873b03e1fdc 100644 --- a/net/core/drop_monitor.c +++ b/net/core/drop_monitor.c @@ -154,6 +154,7 @@ static void sched_send_work(unsigned long _data) static void trace_drop_common(struct sk_buff *skb, void *location) { struct net_dm_alert_msg *msg; + struct net_dm_drop_point *point; struct nlmsghdr *nlh; struct nlattr *nla; int i; @@ -172,11 +173,13 @@ static void trace_drop_common(struct sk_buff *skb, void *location) nlh = (struct nlmsghdr *)dskb->data; nla = genlmsg_data(nlmsg_data(nlh)); msg = nla_data(nla); + point = msg->points; for (i = 0; i < msg->entries; i++) { - if (!memcmp(&location, msg->points[i].pc, sizeof(void *))) { - msg->points[i].count++; + if (!memcmp(&location, &point->pc, sizeof(void *))) { + point->count++; goto out; } + point++; } if (msg->entries == dm_hit_limit) goto out; @@ -185,8 +188,8 @@ static void trace_drop_common(struct sk_buff *skb, void *location) */ __nla_reserve_nohdr(dskb, sizeof(struct net_dm_drop_point)); nla->nla_len += NLA_ALIGN(sizeof(struct net_dm_drop_point)); - memcpy(msg->points[msg->entries].pc, &location, sizeof(void *)); - msg->points[msg->entries].count = 1; + memcpy(point->pc, &location, sizeof(void *)); + point->count = 1; msg->entries++; if (!timer_pending(&data->send_timer)) { diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c index 1c4810919a0a35900d45a659de0cd780b7e500d3..8699016749ce386ee6519e8d1cc4fda497e99613 100644 --- a/net/core/netprio_cgroup.c +++ b/net/core/netprio_cgroup.c @@ -241,6 +241,8 @@ static void net_prio_attach(struct cgroup_taskset *tset) struct task_struct *p; struct cgroup_subsys_state *css; + cgroup_sk_alloc_disable(); + cgroup_taskset_for_each(p, css, tset) { void *v = (void *)(unsigned long)css->cgroup->id; diff --git a/net/core/sock.c b/net/core/sock.c index bed67e70ec1e936614b2fcfec8e0cdd908700740..319155ca5c7b3d9032b452ef7dea0abc1e0953c6 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1538,6 +1538,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority, cgroup_sk_alloc(&sk->sk_cgrp_data); sock_update_classid(&sk->sk_cgrp_data); sock_update_netprioidx(&sk->sk_cgrp_data); + sk_tx_queue_clear(sk); } return sk; @@ -1688,7 +1689,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) /* sk->sk_memcg will be populated at accept() time */ newsk->sk_memcg = NULL; - cgroup_sk_alloc(&newsk->sk_cgrp_data); + cgroup_sk_clone(&newsk->sk_cgrp_data); rcu_read_lock(); filter = rcu_dereference(sk->sk_filter); @@ -1740,6 +1741,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) */ sk_refcnt_debug_inc(newsk); sk_set_socket(newsk, NULL); + sk_tx_queue_clear(newsk); newsk->sk_wq = NULL; if (newsk->sk_prot->sockets_allocated) diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index 58a401e9cf09db8336aaa780d10f8e008e95c8a5..b438bed6749d47b9103c3c1cea1d1d7056e5c2fc 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c @@ -211,7 +211,7 @@ static int dccp_v6_send_response(const struct sock *sk, struct request_sock *req final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt), &final); rcu_read_unlock(); - dst = ip6_dst_lookup_flow(sk, &fl6, final_p); + dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p); if (IS_ERR(dst)) { err = PTR_ERR(dst); dst = NULL; @@ -282,7 +282,7 @@ static void dccp_v6_ctl_send_reset(const struct sock *sk, struct sk_buff *rxskb) security_skb_classify_flow(rxskb, flowi6_to_flowi(&fl6)); /* sk = NULL, but it is safe for now. RST socket required. */ - dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL); + dst = ip6_dst_lookup_flow(sock_net(ctl_sk), ctl_sk, &fl6, NULL); if (!IS_ERR(dst)) { skb_dst_set(skb, dst); ip6_xmit(ctl_sk, skb, &fl6, 0, NULL, 0); @@ -912,7 +912,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr, opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk)); final_p = fl6_update_dst(&fl6, opt, &final); - dst = ip6_dst_lookup_flow(sk, &fl6, final_p); + dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p); if (IS_ERR(dst)) { err = PTR_ERR(dst); goto failure; diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c index 1c21dc5d6dd4049c47f947ac727516500d534ff2..5535b722f66d67fb43e44124343e80283f613621 100644 --- a/net/ipv4/cipso_ipv4.c +++ b/net/ipv4/cipso_ipv4.c @@ -1272,7 +1272,8 @@ static int cipso_v4_parsetag_rbm(const struct cipso_v4_doi *doi_def, return ret_val; } - secattr->flags |= NETLBL_SECATTR_MLS_CAT; + if (secattr->attr.mls.cat) + secattr->flags |= NETLBL_SECATTR_MLS_CAT; } return 0; @@ -1453,7 +1454,8 @@ static int cipso_v4_parsetag_rng(const struct cipso_v4_doi *doi_def, return ret_val; } - secattr->flags |= NETLBL_SECATTR_MLS_CAT; + if (secattr->attr.mls.cat) + secattr->flags |= NETLBL_SECATTR_MLS_CAT; } return 0; diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 7d9165beca8ac32c289ff84c73ba46583dab2fbe..2c339995827cd828b779e233f3ff25e0b93a40d2 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -262,6 +262,7 @@ static struct in_device *inetdev_init(struct net_device *dev) err = devinet_sysctl_register(in_dev); if (err) { in_dev->dead = 1; + neigh_parms_release(&arp_tbl, in_dev->arp_parms); in_dev_put(in_dev); in_dev = NULL; goto out; diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index eff703cb13b6f26c8df9117c071d0b7fc37e0084..bc233fdfae0fe124438334823bb23f1b71c80f1b 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -839,7 +839,7 @@ static int fib_check_nh(struct fib_config *cfg, struct fib_info *fi, if (fl4.flowi4_scope < RT_SCOPE_LINK) fl4.flowi4_scope = RT_SCOPE_LINK; - if (cfg->fc_table) + if (cfg->fc_table && cfg->fc_table != RT_TABLE_MAIN) tbl = fib_get_table(net, cfg->fc_table); if (tbl) diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index f6793017a20d95f6af3c889256008459d50342b7..44cc17c43a6b5825111e3a6d1eb7c4406948f76b 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@ -98,9 +98,10 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn, __be32 remote, __be32 local, __be32 key) { - unsigned int hash; struct ip_tunnel *t, *cand = NULL; struct hlist_head *head; + struct net_device *ndev; + unsigned int hash; hash = ip_tunnel_hash(key, remote); head = &itn->tunnels[hash]; @@ -175,8 +176,9 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn, if (t && t->dev->flags & IFF_UP) return t; - if (itn->fb_tunnel_dev && itn->fb_tunnel_dev->flags & IFF_UP) - return netdev_priv(itn->fb_tunnel_dev); + ndev = READ_ONCE(itn->fb_tunnel_dev); + if (ndev && ndev->flags & IFF_UP) + return netdev_priv(ndev); return NULL; } @@ -1211,9 +1213,9 @@ void ip_tunnel_uninit(struct net_device *dev) struct ip_tunnel_net *itn; itn = net_generic(net, tunnel->ip_tnl_net_id); - /* fb_tunnel_dev will be unregisted in net-exit call. */ - if (itn->fb_tunnel_dev != dev) - ip_tunnel_del(itn, netdev_priv(dev)); + ip_tunnel_del(itn, netdev_priv(dev)); + if (itn->fb_tunnel_dev == dev) + WRITE_ONCE(itn->fb_tunnel_dev, NULL); dst_cache_reset(&tunnel->dst_cache); } diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c index c1693d75e19687f47c9c5cef79fb44fb6b5fb94c..33a85269a9f2615f6c26d324fb0950fd6e4a6f0f 100644 --- a/net/ipv4/ip_vti.c +++ b/net/ipv4/ip_vti.c @@ -50,7 +50,7 @@ static unsigned int vti_net_id __read_mostly; static int vti_tunnel_init(struct net_device *dev); static int vti_input(struct sk_buff *skb, int nexthdr, __be32 spi, - int encap_type) + int encap_type, bool update_skb_dev) { struct ip_tunnel *tunnel; const struct iphdr *iph = ip_hdr(skb); @@ -65,6 +65,9 @@ static int vti_input(struct sk_buff *skb, int nexthdr, __be32 spi, XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = tunnel; + if (update_skb_dev) + skb->dev = tunnel->dev; + return xfrm_input(skb, nexthdr, spi, encap_type); } @@ -74,25 +77,43 @@ static int vti_input(struct sk_buff *skb, int nexthdr, __be32 spi, return 0; } -static int vti_input_ipip(struct sk_buff *skb, int nexthdr, __be32 spi, - int encap_type) +static int vti_input_proto(struct sk_buff *skb, int nexthdr, __be32 spi, + int encap_type) { - struct ip_tunnel *tunnel; + return vti_input(skb, nexthdr, spi, encap_type, false); +} + +static int vti_rcv(struct sk_buff *skb, __be32 spi, bool update_skb_dev) +{ + XFRM_SPI_SKB_CB(skb)->family = AF_INET; + XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr); + + return vti_input(skb, ip_hdr(skb)->protocol, spi, 0, update_skb_dev); +} + +static int vti_rcv_proto(struct sk_buff *skb) +{ + return vti_rcv(skb, 0, false); +} + +static int vti_rcv_tunnel(struct sk_buff *skb) +{ + struct ip_tunnel_net *itn = net_generic(dev_net(skb->dev), vti_net_id); const struct iphdr *iph = ip_hdr(skb); - struct net *net = dev_net(skb->dev); - struct ip_tunnel_net *itn = net_generic(net, vti_net_id); + struct ip_tunnel *tunnel; tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY, iph->saddr, iph->daddr, 0); if (tunnel) { + struct tnl_ptk_info tpi = { + .proto = htons(ETH_P_IP), + }; + if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) goto drop; - - XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = tunnel; - - skb->dev = tunnel->dev; - - return xfrm_input(skb, nexthdr, spi, encap_type); + if (iptunnel_pull_header(skb, 0, tpi.proto, false)) + goto drop; + return ip_tunnel_rcv(tunnel, skb, &tpi, NULL, false); } return -EINVAL; @@ -101,22 +122,6 @@ static int vti_input_ipip(struct sk_buff *skb, int nexthdr, __be32 spi, return 0; } -static int vti_rcv(struct sk_buff *skb) -{ - XFRM_SPI_SKB_CB(skb)->family = AF_INET; - XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr); - - return vti_input(skb, ip_hdr(skb)->protocol, 0, 0); -} - -static int vti_rcv_ipip(struct sk_buff *skb) -{ - XFRM_SPI_SKB_CB(skb)->family = AF_INET; - XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr); - - return vti_input_ipip(skb, ip_hdr(skb)->protocol, ip_hdr(skb)->saddr, 0); -} - static int vti_rcv_cb(struct sk_buff *skb, int err) { unsigned short family; @@ -482,31 +487,31 @@ static void __net_init vti_fb_tunnel_init(struct net_device *dev) } static struct xfrm4_protocol vti_esp4_protocol __read_mostly = { - .handler = vti_rcv, - .input_handler = vti_input, + .handler = vti_rcv_proto, + .input_handler = vti_input_proto, .cb_handler = vti_rcv_cb, .err_handler = vti4_err, .priority = 100, }; static struct xfrm4_protocol vti_ah4_protocol __read_mostly = { - .handler = vti_rcv, - .input_handler = vti_input, + .handler = vti_rcv_proto, + .input_handler = vti_input_proto, .cb_handler = vti_rcv_cb, .err_handler = vti4_err, .priority = 100, }; static struct xfrm4_protocol vti_ipcomp4_protocol __read_mostly = { - .handler = vti_rcv, - .input_handler = vti_input, + .handler = vti_rcv_proto, + .input_handler = vti_input_proto, .cb_handler = vti_rcv_cb, .err_handler = vti4_err, .priority = 100, }; static struct xfrm_tunnel ipip_handler __read_mostly = { - .handler = vti_rcv_ipip, + .handler = vti_rcv_tunnel, .err_handler = vti4_err, .priority = 0, }; diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index cdd627355ed106ae8228ee4a995f5f3b4588a842..df610245d21a552403d977eb4d619d9dc1f838de 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c @@ -702,7 +702,7 @@ static int __init ipip_init(void) rtnl_link_failed: #if IS_ENABLED(CONFIG_MPLS) - xfrm4_tunnel_deregister(&mplsip_handler, AF_INET); + xfrm4_tunnel_deregister(&mplsip_handler, AF_MPLS); xfrm_tunnel_mplsip_failed: #endif diff --git a/net/ipv4/netfilter/nf_nat_pptp.c b/net/ipv4/netfilter/nf_nat_pptp.c index 8a69363b48846c628994e54c92a354ca46f71ebc..c66103de86bd1777bd9d4dc409cacf6e60ebf48b 100644 --- a/net/ipv4/netfilter/nf_nat_pptp.c +++ b/net/ipv4/netfilter/nf_nat_pptp.c @@ -165,8 +165,7 @@ pptp_outbound_pkt(struct sk_buff *skb, break; default: pr_debug("unknown outbound packet 0x%04x:%s\n", msg, - msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] : - pptp_msg_name[0]); + pptp_msg_name(msg)); /* fall through */ case PPTP_SET_LINK_INFO: /* only need to NAT in case PAC is behind NAT box */ @@ -267,9 +266,7 @@ pptp_inbound_pkt(struct sk_buff *skb, pcid_off = offsetof(union pptp_ctrl_union, setlink.peersCallID); break; default: - pr_debug("unknown inbound packet %s\n", - msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] : - pptp_msg_name[0]); + pr_debug("unknown inbound packet %s\n", pptp_msg_name(msg)); /* fall through */ case PPTP_START_SESSION_REQUEST: case PPTP_START_SESSION_REPLY: diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index 16226d49263de714f93d53d0c83cb07482fb2339..186fdf0922d2fc7fe4b06bf435dbf7ac7439c6f8 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c @@ -801,6 +801,9 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) inet_sk_flowi_flags(sk), faddr, saddr, 0, 0, sk->sk_uid); + fl4.fl4_icmp_type = user_icmph.type; + fl4.fl4_icmp_code = user_icmph.code; + security_sk_classify_flow(sk, flowi4_to_flowi(&fl4)); rt = ip_route_output_flow(net, &fl4, sk); if (IS_ERR(rt)) { diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 3c298ec3220027efbfeadd2fc63301ea75df634d..a894adbb6c9b5584b6a1f924d49b2a8323ee8040 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -499,18 +499,16 @@ u32 ip_idents_reserve(u32 hash, int segs) atomic_t *p_id = ip_idents + hash % IP_IDENTS_SZ; u32 old = ACCESS_ONCE(*p_tstamp); u32 now = (u32)jiffies; - u32 new, delta = 0; + u32 delta = 0; if (old != now && cmpxchg(p_tstamp, old, now) == old) delta = prandom_u32_max(now - old); - /* Do not use atomic_add_return() as it makes UBSAN unhappy */ - do { - old = (u32)atomic_read(p_id); - new = old + delta + segs; - } while (atomic_cmpxchg(p_id, old, new) != old); - - return new - segs; + /* If UBSAN reports an error there, please make sure your compiler + * supports -fno-strict-overflow before reporting it that was a bug + * in UBSAN, and it has been fixed in GCC-8. + */ + return atomic_add_return(segs + delta, p_id) - segs; } EXPORT_SYMBOL(ip_idents_reserve); @@ -921,7 +919,7 @@ void ip_rt_send_redirect(struct sk_buff *skb) /* Check for load limit; set rate_last to the latest sent * redirect. */ - if (peer->rate_tokens == 0 || + if (peer->n_redirects == 0 || time_after(jiffies, (peer->rate_last + (ip_rt_redirect_load << peer->n_redirects)))) { diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index 2f871424925eff13a9a42499cbdcf12e45726aa2..68fbf24ba76ba36e43039cd3e212cb6765344bd7 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c @@ -390,7 +390,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) /* Try to redo what tcp_v4_send_synack did. */ req->rsk_window_clamp = tp->window_clamp ? :dst_metric(&rt->dst, RTAX_WINDOW); - tcp_select_initial_window(tcp_full_space(sk), req->mss, + tcp_select_initial_window(sock_net(sk), tcp_full_space(sk), req->mss, &req->rsk_rcv_wnd, &req->rsk_window_clamp, ireq->wscale_ok, &rcv_wscale, dst_metric(&rt->dst, RTAX_INITRWND)); diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index d3b8ac35fb7b3e45d45c20a779f1386174f5ed24..902ff04951c5e945d01377dbc0928a28228266aa 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -29,7 +29,9 @@ static int zero; static int one = 1; +static int three = 3; static int four = 4; +static int hundred = 100; static int thousand = 1000; static int gso_max_segs = GSO_MAX_SEGS; static int tcp_retr1_max = 255; @@ -205,21 +207,6 @@ static int ipv4_ping_group_range(struct ctl_table *table, int write, return ret; } -/* Validate changes from /proc interface. */ -static int proc_tcp_default_init_rwnd(struct ctl_table *ctl, int write, - void __user *buffer, - size_t *lenp, loff_t *ppos) -{ - int old_value = *(int *)ctl->data; - int ret = proc_dointvec(ctl, write, buffer, lenp, ppos); - int new_value = *(int *)ctl->data; - - if (write && ret == 0 && (new_value < 3 || new_value > 100)) - *(int *)ctl->data = old_value; - - return ret; -} - static int proc_tcp_congestion_control(struct ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { @@ -747,13 +734,6 @@ static struct ctl_table ipv4_table[] = { .mode = 0444, .proc_handler = proc_tcp_available_ulp, }, - { - .procname = "tcp_default_init_rwnd", - .data = &sysctl_tcp_default_init_rwnd, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_tcp_default_init_rwnd - }, { .procname = "icmp_msgs_per_sec", .data = &sysctl_icmp_msgs_per_sec, @@ -1220,6 +1200,15 @@ static struct ctl_table ipv4_net_table[] = { .mode = 0644, .proc_handler = proc_dointvec }, + { + .procname = "tcp_default_init_rwnd", + .data = &init_net.ipv4.sysctl_tcp_default_init_rwnd, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &three, + .extra2 = &hundred, + }, { } }; diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index a74298964f4dc6c971105704e40e51b038cf606b..8bd532f118b814b58d312f3f06aeabbaf79262ac 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1986,13 +1986,15 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, tp->urg_data = 0; tcp_fast_path_check(sk); } - if (used + offset < skb->len) - continue; if (TCP_SKB_CB(skb)->has_rxtstamp) { tcp_update_recv_tstamps(skb, &tss); has_tss = true; } + + if (used + offset < skb->len) + continue; + if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) goto found_fin_ok; if (!(flags & MSG_PEEK)) @@ -2372,6 +2374,9 @@ int tcp_disconnect(struct sock *sk, int flags) tp->snd_cwnd_cnt = 0; tp->window_clamp = 0; tp->delivered = 0; + if (icsk->icsk_ca_ops->release) + icsk->icsk_ca_ops->release(sk); + memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv)); tcp_set_ca_state(sk, TCP_CA_Open); tp->is_sack_reneg = 0; tcp_clear_retrans(tp); @@ -2765,10 +2770,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level, #ifdef CONFIG_TCP_MD5SIG case TCP_MD5SIG: case TCP_MD5SIG_EXT: - if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) - err = tp->af_specific->md5_parse(sk, optname, optval, optlen); - else - err = -EINVAL; + err = tp->af_specific->md5_parse(sk, optname, optval, optlen); break; #endif case TCP_USER_TIMEOUT: @@ -3400,10 +3402,13 @@ EXPORT_SYMBOL(tcp_md5_hash_skb_data); int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, const struct tcp_md5sig_key *key) { + u8 keylen = READ_ONCE(key->keylen); /* paired with WRITE_ONCE() in tcp_md5_do_add */ struct scatterlist sg; - sg_init_one(&sg, key->key, key->keylen); - ahash_request_set_crypt(hp->md5_req, &sg, NULL, key->keylen); + sg_init_one(&sg, key->key, keylen); + ahash_request_set_crypt(hp->md5_req, &sg, NULL, keylen); + + /* tcp_md5_do_add() might change key->key under us */ return crypto_ahash_update(hp->md5_req); } EXPORT_SYMBOL(tcp_md5_hash_key); diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c index 494e3c3a21a11045b4f572257ef4826142d80d3b..755151e95f49711cd225db161c799d9edfc01550 100644 --- a/net/ipv4/tcp_cong.c +++ b/net/ipv4/tcp_cong.c @@ -199,7 +199,7 @@ static void tcp_reinit_congestion_control(struct sock *sk, icsk->icsk_ca_setsockopt = 1; memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv)); - if (sk->sk_state != TCP_CLOSE) + if (!((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) tcp_init_congestion_control(sk); } diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c index 78bfadfcf342695ed2e7f02626ddd1fda7d4d543..8b5ba0a5cd38608b55313d6721719d2d0ef6b055 100644 --- a/net/ipv4/tcp_cubic.c +++ b/net/ipv4/tcp_cubic.c @@ -403,6 +403,8 @@ static void hystart_update(struct sock *sk, u32 delay) if (hystart_detect & HYSTART_DELAY) { /* obtain the minimum delay of more than sampling packets */ + if (ca->curr_rtt > delay) + ca->curr_rtt = delay; if (ca->sample_cnt < HYSTART_MIN_SAMPLES) { if (ca->curr_rtt == 0 || ca->curr_rtt > delay) ca->curr_rtt = delay; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 12c8bc5f96f8cd06c0808847a65eebaffe4853ca..00a85567d2ae01388587961173974e1edd33be14 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -95,7 +95,6 @@ int sysctl_tcp_min_rtt_wlen __read_mostly = 300; int sysctl_tcp_moderate_rcvbuf __read_mostly = 1; int sysctl_tcp_early_retrans __read_mostly = 3; int sysctl_tcp_invalid_ratelimit __read_mostly = HZ/2; -int sysctl_tcp_default_init_rwnd __read_mostly = TCP_INIT_CWND * 2; #define FLAG_DATA 0x01 /* Incoming frame contained data. */ #define FLAG_WIN_UPDATE 0x02 /* Incoming ACK was a window update. */ @@ -421,7 +420,7 @@ static void tcp_fixup_rcvbuf(struct sock *sk) int rcvmem; rcvmem = 2 * SKB_TRUESIZE(mss + MAX_TCP_HEADER) * - tcp_default_init_rwnd(mss); + tcp_default_init_rwnd(sock_net(sk), mss); /* Dynamic Right Sizing (DRS) has 2 to 3 RTT latency * Allow enough cushion so that sender is not limited by our window @@ -4508,7 +4507,11 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) if (tcp_ooo_try_coalesce(sk, tp->ooo_last_skb, skb, &fragstolen)) { coalesce_done: - tcp_grow_window(sk, skb); + /* For non sack flows, do not grow window to force DUPACK + * and trigger fast retransmit. + */ + if (tcp_is_sack(tp)) + tcp_grow_window(sk, skb); kfree_skb_partial(skb, fragstolen); skb = NULL; goto add_sack; @@ -4592,7 +4595,11 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) tcp_sack_new_ofo_skb(sk, seq, end_seq); end: if (skb) { - tcp_grow_window(sk, skb); + /* For non sack flows, do not grow window to force DUPACK + * and trigger fast retransmit. + */ + if (tcp_is_sack(tp)) + tcp_grow_window(sk, skb); skb_condense(skb); skb_set_owner_r(skb, sk); } diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 4ee8edb7969542895250d2288c4ea65bad80310c..8762416fa53e8003da697205c28422ab92f3e857 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -995,9 +995,18 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr, key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen); if (key) { - /* Pre-existing entry - just update that one. */ + /* Pre-existing entry - just update that one. + * Note that the key might be used concurrently. + */ memcpy(key->key, newkey, newkeylen); - key->keylen = newkeylen; + + /* Pairs with READ_ONCE() in tcp_md5_hash_key(). + * Also note that a reader could catch new key->keylen value + * but old key->key[], this is the reason we use __GFP_ZERO + * at sock_kmalloc() time below these lines. + */ + WRITE_ONCE(key->keylen, newkeylen); + return 0; } @@ -1013,7 +1022,7 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr, rcu_assign_pointer(tp->md5sig_info, md5sig); } - key = sock_kmalloc(sk, sizeof(*key), gfp); + key = sock_kmalloc(sk, sizeof(*key), gfp | __GFP_ZERO); if (!key) return -ENOMEM; if (!tcp_alloc_md5sig_pool()) { @@ -2509,6 +2518,7 @@ static int __net_init tcp_sk_init(struct net *net) net->ipv4.sysctl_tcp_sack = 1; net->ipv4.sysctl_tcp_window_scaling = 1; net->ipv4.sysctl_tcp_timestamps = 1; + net->ipv4.sysctl_tcp_default_init_rwnd = TCP_INIT_CWND * 2; return 0; fail: diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 61584638dba7fc7d2c1f6c569108400d3237677d..7b4f238cd222de1e02594d97ff5f1a9a98e4282f 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -378,7 +378,7 @@ void tcp_openreq_init_rwin(struct request_sock *req, full_space = rcv_wnd * mss; /* tcp_full_space because it is guaranteed to be the first packet */ - tcp_select_initial_window(full_space, + tcp_select_initial_window(sock_net(sk_listener), full_space, mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0), &req->rsk_rcv_wnd, &req->rsk_window_clamp, diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 2506960189fe9e24551bdc985d510b123ee3b4c9..00a3164b148d60c61fb2e0fe224d6343002ad45d 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -187,14 +187,14 @@ static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts, } -u32 tcp_default_init_rwnd(u32 mss) +u32 tcp_default_init_rwnd(struct net *net, u32 mss) { /* Initial receive window should be twice of TCP_INIT_CWND to * enable proper sending of new unsent data during fast recovery * (RFC 3517, Section 4, NextSeg() rule (2)). Further place a * limit when mss is larger than 1460. */ - u32 init_rwnd = sysctl_tcp_default_init_rwnd; + u32 init_rwnd = net->ipv4.sysctl_tcp_default_init_rwnd; if (mss > 1460) init_rwnd = max((1460 * init_rwnd) / mss, 2U); @@ -208,7 +208,7 @@ u32 tcp_default_init_rwnd(u32 mss) * be a multiple of mss if possible. We assume here that mss >= 1. * This MUST be enforced by all callers. */ -void tcp_select_initial_window(int __space, __u32 mss, +void tcp_select_initial_window(struct net *net, int __space, __u32 mss, __u32 *rcv_wnd, __u32 *window_clamp, int wscale_ok, __u8 *rcv_wscale, __u32 init_rcv_wnd) @@ -251,7 +251,7 @@ void tcp_select_initial_window(int __space, __u32 mss, if (mss > (1 << *rcv_wscale)) { if (!init_rcv_wnd) /* Use default unless specified otherwise */ - init_rcv_wnd = tcp_default_init_rwnd(mss); + init_rcv_wnd = tcp_default_init_rwnd(net, mss); *rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss); } @@ -616,7 +616,8 @@ static unsigned int tcp_synack_options(struct request_sock *req, unsigned int mss, struct sk_buff *skb, struct tcp_out_options *opts, const struct tcp_md5sig_key *md5, - struct tcp_fastopen_cookie *foc) + struct tcp_fastopen_cookie *foc, + enum tcp_synack_type synack_type) { struct inet_request_sock *ireq = inet_rsk(req); unsigned int remaining = MAX_TCP_OPTION_SPACE; @@ -631,7 +632,8 @@ static unsigned int tcp_synack_options(struct request_sock *req, * rather than TS in order to fit in better with old, * buggy kernels, but that was deemed to be unnecessary. */ - ireq->tstamp_ok &= !ireq->sack_ok; + if (synack_type != TCP_SYNACK_COOKIE) + ireq->tstamp_ok &= !ireq->sack_ok; } #endif @@ -3252,8 +3254,8 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst, md5 = tcp_rsk(req)->af_specific->req_md5_lookup(sk, req_to_sk(req)); #endif skb_set_hash(skb, tcp_rsk(req)->txhash, PKT_HASH_TYPE_L4); - tcp_header_size = tcp_synack_options(req, mss, skb, &opts, md5, foc) + - sizeof(*th); + tcp_header_size = tcp_synack_options(req, mss, skb, &opts, md5, + foc, synack_type) + sizeof(*th); skb_push(skb, tcp_header_size); skb_reset_transport_header(skb); @@ -3354,7 +3356,7 @@ static void tcp_connect_init(struct sock *sk) if (rcv_wnd == 0) rcv_wnd = dst_metric(dst, RTAX_INITRWND); - tcp_select_initial_window(tcp_full_space(sk), + tcp_select_initial_window(sock_net(sk), tcp_full_space(sk), tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0), &tp->rcv_wnd, &tp->window_clamp, diff --git a/net/ipv6/addrconf_core.c b/net/ipv6/addrconf_core.c index 9e3488d50b157eb8ee7f9be44370176c157a1c62..e6c5a4b5921df9df95600d910786f80ae22ac4c1 100644 --- a/net/ipv6/addrconf_core.c +++ b/net/ipv6/addrconf_core.c @@ -126,15 +126,16 @@ int inet6addr_validator_notifier_call_chain(unsigned long val, void *v) } EXPORT_SYMBOL(inet6addr_validator_notifier_call_chain); -static int eafnosupport_ipv6_dst_lookup(struct net *net, struct sock *u1, - struct dst_entry **u2, - struct flowi6 *u3) +static struct dst_entry *eafnosupport_ipv6_dst_lookup_flow(struct net *net, + const struct sock *sk, + struct flowi6 *fl6, + const struct in6_addr *final_dst) { - return -EAFNOSUPPORT; + return ERR_PTR(-EAFNOSUPPORT); } const struct ipv6_stub *ipv6_stub __read_mostly = &(struct ipv6_stub) { - .ipv6_dst_lookup = eafnosupport_ipv6_dst_lookup, + .ipv6_dst_lookup_flow = eafnosupport_ipv6_dst_lookup_flow, }; EXPORT_SYMBOL_GPL(ipv6_stub); diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index fda5fae57b834d13e48799dba31aceb7995ca5a5..7edfcf5e6d731d2c38a927eeacf3842de3edea75 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -715,7 +715,7 @@ int inet6_sk_rebuild_header(struct sock *sk) &final); rcu_read_unlock(); - dst = ip6_dst_lookup_flow(sk, &fl6, final_p); + dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p); if (IS_ERR(dst)) { sk->sk_route_caps = 0; sk->sk_err_soft = -PTR_ERR(dst); @@ -873,7 +873,7 @@ static struct pernet_operations inet6_net_ops = { static const struct ipv6_stub ipv6_stub_impl = { .ipv6_sock_mc_join = ipv6_sock_mc_join, .ipv6_sock_mc_drop = ipv6_sock_mc_drop, - .ipv6_dst_lookup = ip6_dst_lookup, + .ipv6_dst_lookup_flow = ip6_dst_lookup_flow, .udpv6_encap_enable = udpv6_encap_enable, .ndisc_send_na = ndisc_send_na, .nd_tbl = &nd_tbl, @@ -1087,11 +1087,11 @@ static int __init inet6_init(void) igmp_fail: ndisc_cleanup(); ndisc_fail: - ip6_mr_cleanup(); + icmpv6_cleanup(); icmp_fail: - unregister_pernet_subsys(&inet6_net_ops); + ip6_mr_cleanup(); ipmr_fail: - icmpv6_cleanup(); + unregister_pernet_subsys(&inet6_net_ops); register_pernet_fail: sock_unregister(PF_INET6); rtnl_unregister_all(PF_INET6); diff --git a/net/ipv6/calipso.c b/net/ipv6/calipso.c index 1c0bb9fb76e61fa7d12317190ebac38847530858..70611784c071f9a7070efce6fd1e3e9a2e2a1cf9 100644 --- a/net/ipv6/calipso.c +++ b/net/ipv6/calipso.c @@ -1061,7 +1061,8 @@ static int calipso_opt_getattr(const unsigned char *calipso, goto getattr_return; } - secattr->flags |= NETLBL_SECATTR_MLS_CAT; + if (secattr->attr.mls.cat) + secattr->flags |= NETLBL_SECATTR_MLS_CAT; } secattr->type = NETLBL_NLTYPE_CALIPSO; diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index fa4f183f123a8b483867d2f684e1964795d878c5..c32a78560a07bcb04480a9bb04ddf75ecc0a8e2d 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -88,7 +88,7 @@ int ip6_datagram_dst_update(struct sock *sk, bool fix_sk_saddr) final_p = fl6_update_dst(&fl6, opt, &final); rcu_read_unlock(); - dst = ip6_dst_lookup_flow(sk, &fl6, final_p); + dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p); if (IS_ERR(dst)) { err = PTR_ERR(dst); goto out; diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c index f52c314d4c97086fba003709f506ec7e408baba8..a50d1943dd620189d964a602585e357b3abf33ac 100644 --- a/net/ipv6/esp6_offload.c +++ b/net/ipv6/esp6_offload.c @@ -121,9 +121,16 @@ static void esp6_gso_encap(struct xfrm_state *x, struct sk_buff *skb) struct ip_esp_hdr *esph; struct ipv6hdr *iph = ipv6_hdr(skb); struct xfrm_offload *xo = xfrm_offload(skb); - int proto = iph->nexthdr; + u8 proto = iph->nexthdr; skb_push(skb, -skb_network_offset(skb)); + + if (x->outer_mode->encap == XFRM_MODE_TRANSPORT) { + __be16 frag; + + ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &proto, &frag); + } + esph = ip_esp_hdr(skb); *skb_mac_header(skb) = IPPROTO_ESP; diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c index 890adadcda16aee20400d7dc394931fde8797c66..92fe9e565da0b685468a82f349ab4b1887042f1e 100644 --- a/net/ipv6/inet6_connection_sock.c +++ b/net/ipv6/inet6_connection_sock.c @@ -52,7 +52,7 @@ struct dst_entry *inet6_csk_route_req(const struct sock *sk, fl6->flowi6_uid = sk->sk_uid; security_req_classify_flow(req, flowi6_to_flowi(fl6)); - dst = ip6_dst_lookup_flow(sk, fl6, final_p); + dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_p); if (IS_ERR(dst)) return NULL; @@ -107,7 +107,7 @@ static struct dst_entry *inet6_csk_route_socket(struct sock *sk, dst = __inet6_csk_dst_check(sk, np->dst_cookie); if (!dst) { - dst = ip6_dst_lookup_flow(sk, fl6, final_p); + dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_p); if (!IS_ERR(dst)) ip6_dst_store(sk, dst, NULL, NULL); diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 726ba41133a36403d52ea4c97e4c886316c8d4af..e07cc2cfc1a63626fb85c4611e8d1d9bc22b6707 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -124,6 +124,7 @@ static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev, int dev_type = (gre_proto == htons(ETH_P_TEB)) ? ARPHRD_ETHER : ARPHRD_IP6GRE; int score, cand_score = 4; + struct net_device *ndev; for_each_ip_tunnel_rcu(t, ign->tunnels_r_l[h0 ^ h1]) { if (!ipv6_addr_equal(local, &t->parms.laddr) || @@ -226,9 +227,9 @@ static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev, if (cand) return cand; - dev = ign->fb_tunnel_dev; - if (dev->flags & IFF_UP) - return netdev_priv(dev); + ndev = READ_ONCE(ign->fb_tunnel_dev); + if (ndev && ndev->flags & IFF_UP) + return netdev_priv(ndev); return NULL; } @@ -364,6 +365,8 @@ static void ip6gre_tunnel_uninit(struct net_device *dev) struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id); ip6gre_tunnel_unlink(ign, t); + if (ign->fb_tunnel_dev == dev) + WRITE_ONCE(ign->fb_tunnel_dev, NULL); dst_cache_reset(&t->dst_cache); dev_put(dev); } diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 94ba38fd154d1baa3cf6d5c5a955dbce89a16d17..292f196886762b4a20b6673b2912ed2f8ef5aecc 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -1089,19 +1089,19 @@ EXPORT_SYMBOL_GPL(ip6_dst_lookup); * It returns a valid dst pointer on success, or a pointer encoded * error code. */ -struct dst_entry *ip6_dst_lookup_flow(const struct sock *sk, struct flowi6 *fl6, +struct dst_entry *ip6_dst_lookup_flow(struct net *net, const struct sock *sk, struct flowi6 *fl6, const struct in6_addr *final_dst) { struct dst_entry *dst = NULL; int err; - err = ip6_dst_lookup_tail(sock_net(sk), sk, &dst, fl6); + err = ip6_dst_lookup_tail(net, sk, &dst, fl6); if (err) return ERR_PTR(err); if (final_dst) fl6->daddr = *final_dst; - return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0); + return xfrm_lookup_route(net, dst, flowi6_to_flowi(fl6), sk, 0); } EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow); @@ -1126,7 +1126,7 @@ struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6, dst = ip6_sk_dst_check(sk, dst, fl6); if (!dst) - dst = ip6_dst_lookup_flow(sk, fl6, final_dst); + dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_dst); return dst; } diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 337b43d4c3eb81f71e10d3a24f4314c48a3664f1..c183222967d099eef6a6fa11fe72cd148c252afb 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -185,14 +185,15 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, retv = -EBUSY; break; } - } - if (sk->sk_protocol == IPPROTO_TCP && - sk->sk_prot != &tcpv6_prot) { - retv = -EBUSY; + } else if (sk->sk_protocol == IPPROTO_TCP) { + if (sk->sk_prot != &tcpv6_prot) { + retv = -EBUSY; + break; + } + } else { break; } - if (sk->sk_protocol != IPPROTO_TCP) - break; + if (sk->sk_state != TCP_ESTABLISHED) { retv = -ENOTCONN; break; diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 611dc5d55fa097f369b0e85d2f6b323a716d7402..959057515fc9ca846e50bf2bf15207e3950dd849 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -2599,6 +2599,7 @@ void ipv6_mc_destroy_dev(struct inet6_dev *idev) idev->mc_list = i->next; write_unlock_bh(&idev->lock); + ip6_mc_clear_src(i); ma_put(i); write_lock_bh(&idev->lock); } diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index ac428311965fbcda4498ba3c8ab230fe634d4906..3d9d2007420370142a6874dfc5f14d5cde73d874 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -929,7 +929,7 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel); - dst = ip6_dst_lookup_flow(sk, &fl6, final_p); + dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p); if (IS_ERR(dst)) { err = PTR_ERR(dst); goto out; diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 4eee004a278887f7fecde73ad5c183a93d3f48c8..98ab1564aade672434147e0b698b912294b350d5 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -1479,8 +1479,10 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk, const struct in6_addr *daddr, *saddr; struct rt6_info *rt6 = (struct rt6_info *)dst; - if (dst_metric_locked(dst, RTAX_MTU)) - return; + /* Note: do *NOT* check dst_metric_locked(dst, RTAX_MTU) + * IPv6 pmtu discovery isn't optional, so 'mtu lock' cannot disable it. + * [see also comment in rt6_mtu_change_route()] + */ if (iph) { daddr = &iph->daddr; diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c index 4e7817abc0b934fbff21ba481c3f6773475c7a63..c3b8f755714321e27a41f63b4ad130f7f19fe260 100644 --- a/net/ipv6/syncookies.c +++ b/net/ipv6/syncookies.c @@ -238,13 +238,13 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) fl6.flowi6_uid = sk->sk_uid; security_req_classify_flow(req, flowi6_to_flowi(&fl6)); - dst = ip6_dst_lookup_flow(sk, &fl6, final_p); + dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p); if (IS_ERR(dst)) goto out_free; } req->rsk_window_clamp = tp->window_clamp ? :dst_metric(dst, RTAX_WINDOW); - tcp_select_initial_window(tcp_full_space(sk), req->mss, + tcp_select_initial_window(sock_net(sk), tcp_full_space(sk), req->mss, &req->rsk_rcv_wnd, &req->rsk_window_clamp, ireq->wscale_ok, &rcv_wscale, dst_metric(dst, RTAX_INITRWND)); diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index e2cbb6286adf92177467ff7f1bca7a6662a1c3fa..20b4a14b9e1a5d36c9cfbb3d9db302e90536bd2f 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -252,7 +252,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); - dst = ip6_dst_lookup_flow(sk, &fl6, final_p); + dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p); if (IS_ERR(dst)) { err = PTR_ERR(dst); goto failure; @@ -865,7 +865,7 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 * Underlying function will use this to retrieve the network * namespace */ - dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL); + dst = ip6_dst_lookup_flow(sock_net(ctl_sk), ctl_sk, &fl6, NULL); if (!IS_ERR(dst)) { skb_dst_set(buff, dst); ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass); diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index d169511ea7e6951262f48e2005a952eb80cfeadf..a8aee0d15f5dce9d5a7d89e782fd5a561a566b9a 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -328,8 +328,8 @@ struct l2tp_session *l2tp_session_get_by_ifname(const struct net *net, } EXPORT_SYMBOL_GPL(l2tp_session_get_by_ifname); -static int l2tp_session_add_to_tunnel(struct l2tp_tunnel *tunnel, - struct l2tp_session *session) +int l2tp_session_register(struct l2tp_session *session, + struct l2tp_tunnel *tunnel) { struct l2tp_session *session_walk; struct hlist_head *g_head; @@ -382,6 +382,10 @@ static int l2tp_session_add_to_tunnel(struct l2tp_tunnel *tunnel, hlist_add_head(&session->hlist, head); write_unlock_bh(&tunnel->hlist_lock); + /* Ignore management session in session count value */ + if (session->session_id != 0) + atomic_inc(&l2tp_session_count); + return 0; err_tlock_pnlock: @@ -391,6 +395,7 @@ static int l2tp_session_add_to_tunnel(struct l2tp_tunnel *tunnel, return err; } +EXPORT_SYMBOL_GPL(l2tp_session_register); /* Lookup a tunnel by id */ @@ -1128,6 +1133,7 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, /* Queue the packet to IP for output */ skb->ignore_df = 1; + skb_dst_drop(skb); #if IS_ENABLED(CONFIG_IPV6) if (l2tp_sk_is_v6(tunnel->sock)) error = inet6_csk_xmit(tunnel->sock, skb, NULL); @@ -1201,10 +1207,6 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len goto out_unlock; } - /* Get routing info from the tunnel socket */ - skb_dst_drop(skb); - skb_dst_set(skb, sk_dst_check(sk, 0)); - inet = inet_sk(sk); fl = &inet->cork.fl; switch (tunnel->encap) { @@ -1576,6 +1578,8 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 tunnel_id, fd); goto err; } + if (sk->sk_family != PF_INET && sk->sk_family != PF_INET6) + goto err; switch (encap) { case L2TP_ENCAPTYPE_UDP: if (sk->sk_protocol != IPPROTO_UDP) { @@ -1783,7 +1787,6 @@ EXPORT_SYMBOL_GPL(l2tp_session_set_header_len); struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg) { struct l2tp_session *session; - int err; session = kzalloc(sizeof(struct l2tp_session) + priv_size, GFP_KERNEL); if (session != NULL) { @@ -1840,17 +1843,6 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn refcount_set(&session->ref_count, 1); - err = l2tp_session_add_to_tunnel(tunnel, session); - if (err) { - kfree(session); - - return ERR_PTR(err); - } - - /* Ignore management session in session count value */ - if (session->session_id != 0) - atomic_inc(&l2tp_session_count); - return session; } diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index c3650b353ceaa0bcfe6ab61a42687fb8ca84afc4..0cb1b57512ad310a2360d6a0d6c50ebc9c300000 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h @@ -256,6 +256,9 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg); +int l2tp_session_register(struct l2tp_session *session, + struct l2tp_tunnel *tunnel); + void __l2tp_session_unhash(struct l2tp_session *session); int l2tp_session_delete(struct l2tp_session *session); void l2tp_session_free(struct l2tp_session *session); diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c index 014a7bc2a872514cf4422302a92b692ecda31c27..d29bfee291cbfe89cb535fd60b3435075b4ee935 100644 --- a/net/l2tp/l2tp_eth.c +++ b/net/l2tp/l2tp_eth.c @@ -54,7 +54,7 @@ struct l2tp_eth { /* via l2tp_session_priv() */ struct l2tp_eth_sess { - struct net_device *dev; + struct net_device __rcu *dev; }; @@ -72,7 +72,14 @@ static int l2tp_eth_dev_init(struct net_device *dev) static void l2tp_eth_dev_uninit(struct net_device *dev) { - dev_put(dev); + struct l2tp_eth *priv = netdev_priv(dev); + struct l2tp_eth_sess *spriv; + + spriv = l2tp_session_priv(priv->session); + RCU_INIT_POINTER(spriv->dev, NULL); + /* No need for synchronize_net() here. We're called by + * unregister_netdev*(), which does the synchronisation for us. + */ } static int l2tp_eth_dev_xmit(struct sk_buff *skb, struct net_device *dev) @@ -130,8 +137,8 @@ static void l2tp_eth_dev_setup(struct net_device *dev) static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb, int data_len) { struct l2tp_eth_sess *spriv = l2tp_session_priv(session); - struct net_device *dev = spriv->dev; - struct l2tp_eth *priv = netdev_priv(dev); + struct net_device *dev; + struct l2tp_eth *priv; if (session->debug & L2TP_MSG_DATA) { unsigned int length; @@ -155,16 +162,25 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb, skb_dst_drop(skb); nf_reset(skb); + rcu_read_lock(); + dev = rcu_dereference(spriv->dev); + if (!dev) + goto error_rcu; + + priv = netdev_priv(dev); if (dev_forward_skb(dev, skb) == NET_RX_SUCCESS) { atomic_long_inc(&priv->rx_packets); atomic_long_add(data_len, &priv->rx_bytes); } else { atomic_long_inc(&priv->rx_errors); } + rcu_read_unlock(); + return; +error_rcu: + rcu_read_unlock(); error: - atomic_long_inc(&priv->rx_errors); kfree_skb(skb); } @@ -175,11 +191,15 @@ static void l2tp_eth_delete(struct l2tp_session *session) if (session) { spriv = l2tp_session_priv(session); - dev = spriv->dev; + + rtnl_lock(); + dev = rtnl_dereference(spriv->dev); if (dev) { - unregister_netdev(dev); - spriv->dev = NULL; + unregister_netdevice(dev); + rtnl_unlock(); module_put(THIS_MODULE); + } else { + rtnl_unlock(); } } } @@ -189,9 +209,20 @@ static void l2tp_eth_show(struct seq_file *m, void *arg) { struct l2tp_session *session = arg; struct l2tp_eth_sess *spriv = l2tp_session_priv(session); - struct net_device *dev = spriv->dev; + struct net_device *dev; + + rcu_read_lock(); + dev = rcu_dereference(spriv->dev); + if (!dev) { + rcu_read_unlock(); + return; + } + dev_hold(dev); + rcu_read_unlock(); seq_printf(m, " interface %s\n", dev->name); + + dev_put(dev); } #endif @@ -268,14 +299,14 @@ static int l2tp_eth_create(struct net *net, struct l2tp_tunnel *tunnel, peer_session_id, cfg); if (IS_ERR(session)) { rc = PTR_ERR(session); - goto out; + goto err; } dev = alloc_netdev(sizeof(*priv), name, name_assign_type, l2tp_eth_dev_setup); if (!dev) { rc = -ENOMEM; - goto out_del_session; + goto err_sess; } dev_net_set(dev, net); @@ -295,26 +326,48 @@ static int l2tp_eth_create(struct net *net, struct l2tp_tunnel *tunnel, #endif spriv = l2tp_session_priv(session); - spriv->dev = dev; - rc = register_netdev(dev); - if (rc < 0) - goto out_del_dev; + l2tp_session_inc_refcount(session); + + rtnl_lock(); + + /* Register both device and session while holding the rtnl lock. This + * ensures that l2tp_eth_delete() will see that there's a device to + * unregister, even if it happened to run before we assign spriv->dev. + */ + rc = l2tp_session_register(session, tunnel); + if (rc < 0) { + rtnl_unlock(); + goto err_sess_dev; + } + + rc = register_netdevice(dev); + if (rc < 0) { + rtnl_unlock(); + l2tp_session_delete(session); + l2tp_session_dec_refcount(session); + free_netdev(dev); + + return rc; + } - __module_get(THIS_MODULE); - /* Must be done after register_netdev() */ strlcpy(session->ifname, dev->name, IFNAMSIZ); + rcu_assign_pointer(spriv->dev, dev); - dev_hold(dev); + rtnl_unlock(); + + l2tp_session_dec_refcount(session); + + __module_get(THIS_MODULE); return 0; -out_del_dev: +err_sess_dev: + l2tp_session_dec_refcount(session); free_netdev(dev); - spriv->dev = NULL; -out_del_session: - l2tp_session_delete(session); -out: +err_sess: + kfree(session); +err: return rc; } diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index 8ab3340a31140d6c935ca769f90ac1549c38662b..c778f7f5c3503013269face37cc8d868387954f0 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c @@ -24,7 +24,6 @@ #include #include #include -#include #include #include #include @@ -215,15 +214,31 @@ static int l2tp_ip_recv(struct sk_buff *skb) return 0; } -static int l2tp_ip_open(struct sock *sk) +static int l2tp_ip_hash(struct sock *sk) { - /* Prevent autobind. We don't have ports. */ - inet_sk(sk)->inet_num = IPPROTO_L2TP; + if (sk_unhashed(sk)) { + write_lock_bh(&l2tp_ip_lock); + sk_add_node(sk, &l2tp_ip_table); + write_unlock_bh(&l2tp_ip_lock); + } + return 0; +} +static void l2tp_ip_unhash(struct sock *sk) +{ + if (sk_unhashed(sk)) + return; write_lock_bh(&l2tp_ip_lock); - sk_add_node(sk, &l2tp_ip_table); + sk_del_node_init(sk); write_unlock_bh(&l2tp_ip_lock); +} + +static int l2tp_ip_open(struct sock *sk) +{ + /* Prevent autobind. We don't have ports. */ + inet_sk(sk)->inet_num = IPPROTO_L2TP; + l2tp_ip_hash(sk); return 0; } @@ -605,8 +620,8 @@ static struct proto l2tp_ip_prot = { .sendmsg = l2tp_ip_sendmsg, .recvmsg = l2tp_ip_recvmsg, .backlog_rcv = l2tp_ip_backlog_recv, - .hash = inet_hash, - .unhash = inet_unhash, + .hash = l2tp_ip_hash, + .unhash = l2tp_ip_unhash, .obj_size = sizeof(struct l2tp_ip_sock), #ifdef CONFIG_COMPAT .compat_setsockopt = compat_ip_setsockopt, diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index a2540caf0686947133e6895a79e57cc73aa26c48..6f124af812aa49aefa4d2f578029ceab9989f6f9 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@ -24,8 +24,6 @@ #include #include #include -#include -#include #include #include #include @@ -228,15 +226,31 @@ static int l2tp_ip6_recv(struct sk_buff *skb) return 0; } -static int l2tp_ip6_open(struct sock *sk) +static int l2tp_ip6_hash(struct sock *sk) { - /* Prevent autobind. We don't have ports. */ - inet_sk(sk)->inet_num = IPPROTO_L2TP; + if (sk_unhashed(sk)) { + write_lock_bh(&l2tp_ip6_lock); + sk_add_node(sk, &l2tp_ip6_table); + write_unlock_bh(&l2tp_ip6_lock); + } + return 0; +} +static void l2tp_ip6_unhash(struct sock *sk) +{ + if (sk_unhashed(sk)) + return; write_lock_bh(&l2tp_ip6_lock); - sk_add_node(sk, &l2tp_ip6_table); + sk_del_node_init(sk); write_unlock_bh(&l2tp_ip6_lock); +} + +static int l2tp_ip6_open(struct sock *sk) +{ + /* Prevent autobind. We don't have ports. */ + inet_sk(sk)->inet_num = IPPROTO_L2TP; + l2tp_ip6_hash(sk); return 0; } @@ -628,7 +642,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel); - dst = ip6_dst_lookup_flow(sk, &fl6, final_p); + dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p); if (IS_ERR(dst)) { err = PTR_ERR(dst); goto out; @@ -741,8 +755,8 @@ static struct proto l2tp_ip6_prot = { .sendmsg = l2tp_ip6_sendmsg, .recvmsg = l2tp_ip6_recvmsg, .backlog_rcv = l2tp_ip6_backlog_recv, - .hash = inet6_hash, - .unhash = inet_unhash, + .hash = l2tp_ip6_hash, + .unhash = l2tp_ip6_unhash, .obj_size = sizeof(struct l2tp_ip6_sock), #ifdef CONFIG_COMPAT .compat_setsockopt = compat_ipv6_setsockopt, diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index 82853867751808a5782dc0139dd2aff4bfe95c03..1bde22f2004aa9745cfe6228aad4d6b77ea57d22 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@ -122,8 +122,11 @@ struct pppol2tp_session { int owner; /* pid that opened the socket */ - struct sock *sock; /* Pointer to the session + struct mutex sk_lock; /* Protects .sk */ + struct sock __rcu *sk; /* Pointer to the session * PPPoX socket */ + struct sock *__sk; /* Copy of .sk, for cleanup */ + struct rcu_head rcu; /* For asynchronous release */ struct sock *tunnel_sock; /* Pointer to the tunnel UDP * socket */ int flags; /* accessed by PPPIOCGFLAGS. @@ -138,6 +141,24 @@ static const struct ppp_channel_ops pppol2tp_chan_ops = { static const struct proto_ops pppol2tp_ops; +/* Retrieves the pppol2tp socket associated to a session. + * A reference is held on the returned socket, so this function must be paired + * with sock_put(). + */ +static struct sock *pppol2tp_session_get_sock(struct l2tp_session *session) +{ + struct pppol2tp_session *ps = l2tp_session_priv(session); + struct sock *sk; + + rcu_read_lock(); + sk = rcu_dereference(ps->sk); + if (sk) + sock_hold(sk); + rcu_read_unlock(); + + return sk; +} + /* Helpers to obtain tunnel/session contexts from sockets. */ static inline struct l2tp_session *pppol2tp_sock_to_session(struct sock *sk) @@ -205,7 +226,8 @@ static void pppol2tp_recv(struct l2tp_session *session, struct sk_buff *skb, int /* If the socket is bound, send it in to PPP's input queue. Otherwise * queue it on the session socket. */ - sk = ps->sock; + rcu_read_lock(); + sk = rcu_dereference(ps->sk); if (sk == NULL) goto no_sock; @@ -245,30 +267,16 @@ static void pppol2tp_recv(struct l2tp_session *session, struct sk_buff *skb, int kfree_skb(skb); } } + rcu_read_unlock(); return; no_sock: + rcu_read_unlock(); l2tp_info(session, L2TP_MSG_DATA, "%s: no socket\n", session->name); kfree_skb(skb); } -static void pppol2tp_session_sock_hold(struct l2tp_session *session) -{ - struct pppol2tp_session *ps = l2tp_session_priv(session); - - if (ps->sock) - sock_hold(ps->sock); -} - -static void pppol2tp_session_sock_put(struct l2tp_session *session) -{ - struct pppol2tp_session *ps = l2tp_session_priv(session); - - if (ps->sock) - sock_put(ps->sock); -} - /************************************************************************ * Transmit handling ***********************************************************************/ @@ -429,17 +437,16 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb) */ static void pppol2tp_session_close(struct l2tp_session *session) { - struct pppol2tp_session *ps = l2tp_session_priv(session); - struct sock *sk = ps->sock; - struct socket *sock = sk->sk_socket; + struct sock *sk; BUG_ON(session->magic != L2TP_SESSION_MAGIC); - if (sock) - inet_shutdown(sock, SEND_SHUTDOWN); - - /* Don't let the session go away before our socket does */ - l2tp_session_inc_refcount(session); + sk = pppol2tp_session_get_sock(session); + if (sk) { + if (sk->sk_socket) + inet_shutdown(sk->sk_socket, SEND_SHUTDOWN); + sock_put(sk); + } } /* Really kill the session socket. (Called from sock_put() if @@ -459,6 +466,14 @@ static void pppol2tp_session_destruct(struct sock *sk) } } +static void pppol2tp_put_sk(struct rcu_head *head) +{ + struct pppol2tp_session *ps; + + ps = container_of(head, typeof(*ps), rcu); + sock_put(ps->__sk); +} + /* Called when the PPPoX socket (session) is closed. */ static int pppol2tp_release(struct socket *sock) @@ -484,11 +499,23 @@ static int pppol2tp_release(struct socket *sock) session = pppol2tp_sock_to_session(sk); - /* Purge any queued data */ if (session != NULL) { - __l2tp_session_unhash(session); - l2tp_session_queue_purge(session); - sock_put(sk); + struct pppol2tp_session *ps; + + l2tp_session_delete(session); + + ps = l2tp_session_priv(session); + mutex_lock(&ps->sk_lock); + ps->__sk = rcu_dereference_protected(ps->sk, + lockdep_is_held(&ps->sk_lock)); + RCU_INIT_POINTER(ps->sk, NULL); + mutex_unlock(&ps->sk_lock); + call_rcu(&ps->rcu, pppol2tp_put_sk); + + /* Rely on the sock_put() call at the end of the function for + * dropping the reference held by pppol2tp_sock_to_session(). + * The last reference will be dropped by pppol2tp_put_sk(). + */ } release_sock(sk); @@ -555,16 +582,47 @@ static int pppol2tp_create(struct net *net, struct socket *sock, int kern) static void pppol2tp_show(struct seq_file *m, void *arg) { struct l2tp_session *session = arg; - struct pppol2tp_session *ps = l2tp_session_priv(session); + struct sock *sk; + + sk = pppol2tp_session_get_sock(session); + if (sk) { + struct pppox_sock *po = pppox_sk(sk); - if (ps) { - struct pppox_sock *po = pppox_sk(ps->sock); - if (po) - seq_printf(m, " interface %s\n", ppp_dev_name(&po->chan)); + seq_printf(m, " interface %s\n", ppp_dev_name(&po->chan)); + sock_put(sk); } } #endif +static void pppol2tp_session_init(struct l2tp_session *session) +{ + struct pppol2tp_session *ps; + struct dst_entry *dst; + + session->recv_skb = pppol2tp_recv; + session->session_close = pppol2tp_session_close; +#if IS_ENABLED(CONFIG_L2TP_DEBUGFS) + session->show = pppol2tp_show; +#endif + + ps = l2tp_session_priv(session); + mutex_init(&ps->sk_lock); + ps->tunnel_sock = session->tunnel->sock; + ps->owner = current->pid; + + /* If PMTU discovery was enabled, use the MTU that was discovered */ + dst = sk_dst_get(session->tunnel->sock); + if (dst) { + u32 pmtu = dst_mtu(dst); + + if (pmtu) { + session->mtu = pmtu - PPPOL2TP_HEADER_OVERHEAD; + session->mru = pmtu - PPPOL2TP_HEADER_OVERHEAD; + } + dst_release(dst); + } +} + /* connect() handler. Attach a PPPoX socket to a tunnel UDP socket */ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, @@ -576,7 +634,6 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, struct l2tp_session *session = NULL; struct l2tp_tunnel *tunnel; struct pppol2tp_session *ps; - struct dst_entry *dst; struct l2tp_session_cfg cfg = { 0, }; int error = 0; u32 tunnel_id, peer_tunnel_id; @@ -698,13 +755,17 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, /* Using a pre-existing session is fine as long as it hasn't * been connected yet. */ - if (ps->sock) { + mutex_lock(&ps->sk_lock); + if (rcu_dereference_protected(ps->sk, + lockdep_is_held(&ps->sk_lock))) { + mutex_unlock(&ps->sk_lock); error = -EEXIST; goto end; } /* consistency checks */ if (ps->tunnel_sock != tunnel->sock) { + mutex_unlock(&ps->sk_lock); error = -EEXIST; goto end; } @@ -720,35 +781,19 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, error = PTR_ERR(session); goto end; } - } - - /* Associate session with its PPPoL2TP socket */ - ps = l2tp_session_priv(session); - ps->owner = current->pid; - ps->sock = sk; - ps->tunnel_sock = tunnel->sock; - - session->recv_skb = pppol2tp_recv; - session->session_close = pppol2tp_session_close; -#if IS_ENABLED(CONFIG_L2TP_DEBUGFS) - session->show = pppol2tp_show; -#endif - - /* We need to know each time a skb is dropped from the reorder - * queue. - */ - session->ref = pppol2tp_session_sock_hold; - session->deref = pppol2tp_session_sock_put; - /* If PMTU discovery was enabled, use the MTU that was discovered */ - dst = sk_dst_get(tunnel->sock); - if (dst != NULL) { - u32 pmtu = dst_mtu(dst); + pppol2tp_session_init(session); + ps = l2tp_session_priv(session); + l2tp_session_inc_refcount(session); - if (pmtu != 0) - session->mtu = session->mru = pmtu - - PPPOL2TP_HEADER_OVERHEAD; - dst_release(dst); + mutex_lock(&ps->sk_lock); + error = l2tp_session_register(session, tunnel); + if (error < 0) { + mutex_unlock(&ps->sk_lock); + kfree(session); + goto end; + } + drop_refcnt = true; } /* Special case: if source & dest session_id == 0x0000, this @@ -773,12 +818,23 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, po->chan.mtu = session->mtu; error = ppp_register_net_channel(sock_net(sk), &po->chan); - if (error) + if (error) { + mutex_unlock(&ps->sk_lock); goto end; + } out_no_ppp: /* This is how we get the session context from the socket. */ sk->sk_user_data = session; + rcu_assign_pointer(ps->sk, sk); + mutex_unlock(&ps->sk_lock); + + /* Keep the reference we've grabbed on the session: sk doesn't expect + * the session to disappear. pppol2tp_session_destruct() is responsible + * for dropping it. + */ + drop_refcnt = false; + sk->sk_state = PPPOX_CONNECTED; l2tp_info(session, L2TP_MSG_CONTROL, "%s: created\n", session->name); @@ -802,12 +858,11 @@ static int pppol2tp_session_create(struct net *net, struct l2tp_tunnel *tunnel, { int error; struct l2tp_session *session; - struct pppol2tp_session *ps; /* Error if tunnel socket is not prepped */ if (!tunnel->sock) { error = -ENOENT; - goto out; + goto err; } /* Default MTU values. */ @@ -822,18 +877,20 @@ static int pppol2tp_session_create(struct net *net, struct l2tp_tunnel *tunnel, peer_session_id, cfg); if (IS_ERR(session)) { error = PTR_ERR(session); - goto out; + goto err; } - ps = l2tp_session_priv(session); - ps->tunnel_sock = tunnel->sock; + pppol2tp_session_init(session); - l2tp_info(session, L2TP_MSG_CONTROL, "%s: created\n", - session->name); + error = l2tp_session_register(session, tunnel); + if (error < 0) + goto err_sess; - error = 0; + return 0; -out: +err_sess: + kfree(session); +err: return error; } @@ -994,12 +1051,10 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session, "%s: pppol2tp_session_ioctl(cmd=%#x, arg=%#lx)\n", session->name, cmd, arg); - sk = ps->sock; + sk = pppol2tp_session_get_sock(session); if (!sk) return -EBADR; - sock_hold(sk); - switch (cmd) { case SIOCGIFMTU: err = -ENXIO; @@ -1275,7 +1330,6 @@ static int pppol2tp_session_setsockopt(struct sock *sk, int optname, int val) { int err = 0; - struct pppol2tp_session *ps = l2tp_session_priv(session); switch (optname) { case PPPOL2TP_SO_RECVSEQ: @@ -1296,8 +1350,8 @@ static int pppol2tp_session_setsockopt(struct sock *sk, } session->send_seq = !!val; { - struct sock *ssk = ps->sock; - struct pppox_sock *po = pppox_sk(ssk); + struct pppox_sock *po = pppox_sk(sk); + po->chan.hdrlen = val ? PPPOL2TP_L2TP_HDR_SIZE_SEQ : PPPOL2TP_L2TP_HDR_SIZE_NOSEQ; } @@ -1636,8 +1690,9 @@ static void pppol2tp_seq_session_show(struct seq_file *m, void *v) { struct l2tp_session *session = v; struct l2tp_tunnel *tunnel = session->tunnel; - struct pppol2tp_session *ps = l2tp_session_priv(session); - struct pppox_sock *po = pppox_sk(ps->sock); + unsigned char state; + char user_data_ok; + struct sock *sk; u32 ip = 0; u16 port = 0; @@ -1647,6 +1702,15 @@ static void pppol2tp_seq_session_show(struct seq_file *m, void *v) port = ntohs(inet->inet_sport); } + sk = pppol2tp_session_get_sock(session); + if (sk) { + state = sk->sk_state; + user_data_ok = (session == sk->sk_user_data) ? 'Y' : 'N'; + } else { + state = 0; + user_data_ok = 'N'; + } + seq_printf(m, " SESSION '%s' %08X/%d %04X/%04X -> " "%04X/%04X %d %c\n", session->name, ip, port, @@ -1654,9 +1718,7 @@ static void pppol2tp_seq_session_show(struct seq_file *m, void *v) session->session_id, tunnel->peer_tunnel_id, session->peer_session_id, - ps->sock->sk_state, - (session == ps->sock->sk_user_data) ? - 'Y' : 'N'); + state, user_data_ok); seq_printf(m, " %d/%d/%c/%c/%s %08x %u\n", session->mtu, session->mru, session->recv_seq ? 'R' : '-', @@ -1673,8 +1735,12 @@ static void pppol2tp_seq_session_show(struct seq_file *m, void *v) atomic_long_read(&session->stats.rx_bytes), atomic_long_read(&session->stats.rx_errors)); - if (po) + if (sk) { + struct pppox_sock *po = pppox_sk(sk); + seq_printf(m, " interface %s\n", ppp_dev_name(&po->chan)); + sock_put(sk); + } } static int pppol2tp_seq_show(struct seq_file *m, void *v) diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index d552e88197133345a35943ce6aa74e8bc993a0a6..d301ac51bbe1d32ec212fbf1570ffe1327b985d5 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c @@ -273,6 +273,10 @@ static int llc_ui_autobind(struct socket *sock, struct sockaddr_llc *addr) if (!sock_flag(sk, SOCK_ZAPPED)) goto out; + if (!addr->sllc_arphrd) + addr->sllc_arphrd = ARPHRD_ETHER; + if (addr->sllc_arphrd != ARPHRD_ETHER) + goto out; rc = -ENODEV; if (sk->sk_bound_dev_if) { llc->dev = dev_get_by_index(&init_net, sk->sk_bound_dev_if); @@ -330,15 +334,15 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen) if (unlikely(!sock_flag(sk, SOCK_ZAPPED) || addrlen != sizeof(*addr))) goto out; rc = -EAFNOSUPPORT; - if (unlikely(addr->sllc_family != AF_LLC)) + if (!addr->sllc_arphrd) + addr->sllc_arphrd = ARPHRD_ETHER; + if (unlikely(addr->sllc_family != AF_LLC || addr->sllc_arphrd != ARPHRD_ETHER)) goto out; rc = -ENODEV; rcu_read_lock(); if (sk->sk_bound_dev_if) { llc->dev = dev_get_by_index_rcu(&init_net, sk->sk_bound_dev_if); if (llc->dev) { - if (!addr->sllc_arphrd) - addr->sllc_arphrd = llc->dev->type; if (is_zero_ether_addr(addr->sllc_mac)) memcpy(addr->sllc_mac, llc->dev->dev_addr, IFHWADDRLEN); diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c index 986e9b6b961d206caa6dc68dc1d5cbb0c3dcd93e..fe65701fe95cc354ad10dc0d3dee5cf2ee44f5c1 100644 --- a/net/mac80211/mesh_hwmp.c +++ b/net/mac80211/mesh_hwmp.c @@ -1088,7 +1088,14 @@ void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata) mesh_path_sel_frame_tx(MPATH_PREQ, 0, sdata->vif.addr, ifmsh->sn, target_flags, mpath->dst, mpath->sn, da, 0, ttl, lifetime, 0, ifmsh->preq_id++, sdata); + + spin_lock_bh(&mpath->state_lock); + if (mpath->flags & MESH_PATH_DELETED) { + spin_unlock_bh(&mpath->state_lock); + goto enddiscovery; + } mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout); + spin_unlock_bh(&mpath->state_lock); enddiscovery: rcu_read_unlock(); diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index f44dd79081222a2f807de0213c6e328cd864daae..2c08d8a83b4e34a558600727059cef2afaea241f 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -2120,6 +2120,7 @@ static int ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx) static int ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc) { + struct ieee80211_hdr *hdr = (void *)rx->skb->data; struct sk_buff *skb = rx->skb; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); @@ -2130,6 +2131,31 @@ static int ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc) if (status->flag & RX_FLAG_DECRYPTED) return 0; + /* check mesh EAPOL frames first */ + if (unlikely(rx->sta && ieee80211_vif_is_mesh(&rx->sdata->vif) && + ieee80211_is_data(fc))) { + struct ieee80211s_hdr *mesh_hdr; + u16 hdr_len = ieee80211_hdrlen(fc); + u16 ethertype_offset; + __be16 ethertype; + + if (!ether_addr_equal(hdr->addr1, rx->sdata->vif.addr)) + goto drop_check; + + /* make sure fixed part of mesh header is there, also checks skb len */ + if (!pskb_may_pull(rx->skb, hdr_len + 6)) + goto drop_check; + + mesh_hdr = (struct ieee80211s_hdr *)(skb->data + hdr_len); + ethertype_offset = hdr_len + ieee80211_get_mesh_hdrlen(mesh_hdr) + + sizeof(rfc1042_header); + + if (skb_copy_bits(rx->skb, ethertype_offset, ðertype, 2) == 0 && + ethertype == rx->sdata->control_port_protocol) + return 0; + } + +drop_check: /* Drop unencrypted frames if key is set. */ if (unlikely(!ieee80211_has_protected(fc) && !ieee80211_is_any_nullfunc(fc) && diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c index 9a153f64b8d7bbb1b91d914536e0b97d493b5b4c..00ede709db2e6e236caf4ff2e149da969f1afabc 100644 --- a/net/mpls/af_mpls.c +++ b/net/mpls/af_mpls.c @@ -587,16 +587,15 @@ static struct net_device *inet6_fib_lookup_dev(struct net *net, struct net_device *dev; struct dst_entry *dst; struct flowi6 fl6; - int err; if (!ipv6_stub) return ERR_PTR(-EAFNOSUPPORT); memset(&fl6, 0, sizeof(fl6)); memcpy(&fl6.daddr, addr, sizeof(struct in6_addr)); - err = ipv6_stub->ipv6_dst_lookup(net, NULL, &dst, &fl6); - if (err) - return ERR_PTR(err); + dst = ipv6_stub->ipv6_dst_lookup_flow(net, NULL, &fl6, NULL); + if (IS_ERR(dst)) + return ERR_CAST(dst); dev = dst->dev; dev_hold(dev); diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c index c2b21c9c12295f90c425be644c0275fcc90ca484..5c59bbad8d1938fcc16549b5e00456404111504c 100644 --- a/net/netfilter/ipset/ip_set_core.c +++ b/net/netfilter/ipset/ip_set_core.c @@ -381,6 +381,8 @@ ip_set_elem_len(struct ip_set *set, struct nlattr *tb[], size_t len, for (id = 0; id < IPSET_EXT_ID_MAX; id++) { if (!add_extension(id, cadt_flags, tb)) continue; + if (align < ip_set_extensions[id].align) + align = ip_set_extensions[id].align; len = ALIGN(len, ip_set_extensions[id].align); set->offset[id] = len; set->extensions |= ip_set_extensions[id].type; diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c index e563921e6af51d2d131c0f9c5ec8628aa7438f42..6ebd63a5d8baa24f19fcffd6f5a9ae98d95640ed 100644 --- a/net/netfilter/ipset/ip_set_list_set.c +++ b/net/netfilter/ipset/ip_set_list_set.c @@ -61,7 +61,7 @@ list_set_ktest(struct ip_set *set, const struct sk_buff *skb, /* Don't lookup sub-counters at all */ opt->cmdflags &= ~IPSET_FLAG_MATCH_COUNTERS; if (opt->cmdflags & IPSET_FLAG_SKIP_SUBCOUNTER_UPDATE) - opt->cmdflags &= ~IPSET_FLAG_SKIP_COUNTER_UPDATE; + opt->cmdflags |= IPSET_FLAG_SKIP_COUNTER_UPDATE; list_for_each_entry_rcu(e, &map->members, list) { if (SET_WITH_TIMEOUT(set) && ip_set_timeout_expired(ext_timeout(e, set))) diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c index b373e053ff9a3cdf2a9f49b15d257e5a5b3ab16e..90261055062ed3c515df20f3744e2b20281618ce 100644 --- a/net/netfilter/ipvs/ip_vs_sync.c +++ b/net/netfilter/ipvs/ip_vs_sync.c @@ -1726,6 +1726,8 @@ static int sync_thread_backup(void *data) { struct ip_vs_sync_thread_data *tinfo = data; struct netns_ipvs *ipvs = tinfo->ipvs; + struct sock *sk = tinfo->sock->sk; + struct udp_sock *up = udp_sk(sk); int len; pr_info("sync thread started: state = BACKUP, mcast_ifn = %s, " @@ -1733,12 +1735,14 @@ static int sync_thread_backup(void *data) ipvs->bcfg.mcast_ifn, ipvs->bcfg.syncid, tinfo->id); while (!kthread_should_stop()) { - wait_event_interruptible(*sk_sleep(tinfo->sock->sk), - !skb_queue_empty(&tinfo->sock->sk->sk_receive_queue) - || kthread_should_stop()); + wait_event_interruptible(*sk_sleep(sk), + !skb_queue_empty_lockless(&sk->sk_receive_queue) || + !skb_queue_empty_lockless(&up->reader_queue) || + kthread_should_stop()); /* do we have data now? */ - while (!skb_queue_empty(&(tinfo->sock->sk->sk_receive_queue))) { + while (!skb_queue_empty_lockless(&sk->sk_receive_queue) || + !skb_queue_empty_lockless(&up->reader_queue)) { len = ip_vs_receive(tinfo->sock, tinfo->buf, ipvs->bcfg.sync_maxlen); if (len <= 0) { diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index f951c83559aded7fc54bd4368b5ca838cf081388..8098e5f4426ac1d4fb24ddf5a23e782e27cd388a 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -1244,9 +1244,9 @@ __nf_conntrack_alloc(struct net *net, *(unsigned long *)(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev) = hash; ct->status = 0; write_pnet(&ct->ct_net, net); - memset(&ct->__nfct_init_offset[0], 0, + memset(&ct->__nfct_init_offset, 0, offsetof(struct nf_conn, proto) - - offsetof(struct nf_conn, __nfct_init_offset[0])); + offsetof(struct nf_conn, __nfct_init_offset)); nf_ct_zone_add(ct, zone); diff --git a/net/netfilter/nf_conntrack_pptp.c b/net/netfilter/nf_conntrack_pptp.c index 11562f2a08bb0d21f97831dcf7ab1ca539ce334e..203107ce24558984543e87f1369e5c2058bba0cc 100644 --- a/net/netfilter/nf_conntrack_pptp.c +++ b/net/netfilter/nf_conntrack_pptp.c @@ -71,24 +71,32 @@ EXPORT_SYMBOL_GPL(nf_nat_pptp_hook_expectfn); #if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG) /* PptpControlMessageType names */ -const char *const pptp_msg_name[] = { - "UNKNOWN_MESSAGE", - "START_SESSION_REQUEST", - "START_SESSION_REPLY", - "STOP_SESSION_REQUEST", - "STOP_SESSION_REPLY", - "ECHO_REQUEST", - "ECHO_REPLY", - "OUT_CALL_REQUEST", - "OUT_CALL_REPLY", - "IN_CALL_REQUEST", - "IN_CALL_REPLY", - "IN_CALL_CONNECT", - "CALL_CLEAR_REQUEST", - "CALL_DISCONNECT_NOTIFY", - "WAN_ERROR_NOTIFY", - "SET_LINK_INFO" +static const char *const pptp_msg_name_array[PPTP_MSG_MAX + 1] = { + [0] = "UNKNOWN_MESSAGE", + [PPTP_START_SESSION_REQUEST] = "START_SESSION_REQUEST", + [PPTP_START_SESSION_REPLY] = "START_SESSION_REPLY", + [PPTP_STOP_SESSION_REQUEST] = "STOP_SESSION_REQUEST", + [PPTP_STOP_SESSION_REPLY] = "STOP_SESSION_REPLY", + [PPTP_ECHO_REQUEST] = "ECHO_REQUEST", + [PPTP_ECHO_REPLY] = "ECHO_REPLY", + [PPTP_OUT_CALL_REQUEST] = "OUT_CALL_REQUEST", + [PPTP_OUT_CALL_REPLY] = "OUT_CALL_REPLY", + [PPTP_IN_CALL_REQUEST] = "IN_CALL_REQUEST", + [PPTP_IN_CALL_REPLY] = "IN_CALL_REPLY", + [PPTP_IN_CALL_CONNECT] = "IN_CALL_CONNECT", + [PPTP_CALL_CLEAR_REQUEST] = "CALL_CLEAR_REQUEST", + [PPTP_CALL_DISCONNECT_NOTIFY] = "CALL_DISCONNECT_NOTIFY", + [PPTP_WAN_ERROR_NOTIFY] = "WAN_ERROR_NOTIFY", + [PPTP_SET_LINK_INFO] = "SET_LINK_INFO" }; + +const char *pptp_msg_name(u_int16_t msg) +{ + if (msg > PPTP_MSG_MAX) + return pptp_msg_name_array[0]; + + return pptp_msg_name_array[msg]; +} EXPORT_SYMBOL(pptp_msg_name); #endif @@ -275,7 +283,7 @@ pptp_inbound_pkt(struct sk_buff *skb, unsigned int protoff, typeof(nf_nat_pptp_hook_inbound) nf_nat_pptp_inbound; msg = ntohs(ctlh->messageType); - pr_debug("inbound control message %s\n", pptp_msg_name[msg]); + pr_debug("inbound control message %s\n", pptp_msg_name(msg)); switch (msg) { case PPTP_START_SESSION_REPLY: @@ -310,7 +318,7 @@ pptp_inbound_pkt(struct sk_buff *skb, unsigned int protoff, pcid = pptpReq->ocack.peersCallID; if (info->pns_call_id != pcid) goto invalid; - pr_debug("%s, CID=%X, PCID=%X\n", pptp_msg_name[msg], + pr_debug("%s, CID=%X, PCID=%X\n", pptp_msg_name(msg), ntohs(cid), ntohs(pcid)); if (pptpReq->ocack.resultCode == PPTP_OUTCALL_CONNECT) { @@ -327,7 +335,7 @@ pptp_inbound_pkt(struct sk_buff *skb, unsigned int protoff, goto invalid; cid = pptpReq->icreq.callID; - pr_debug("%s, CID=%X\n", pptp_msg_name[msg], ntohs(cid)); + pr_debug("%s, CID=%X\n", pptp_msg_name(msg), ntohs(cid)); info->cstate = PPTP_CALL_IN_REQ; info->pac_call_id = cid; break; @@ -346,7 +354,7 @@ pptp_inbound_pkt(struct sk_buff *skb, unsigned int protoff, if (info->pns_call_id != pcid) goto invalid; - pr_debug("%s, PCID=%X\n", pptp_msg_name[msg], ntohs(pcid)); + pr_debug("%s, PCID=%X\n", pptp_msg_name(msg), ntohs(pcid)); info->cstate = PPTP_CALL_IN_CONF; /* we expect a GRE connection from PAC to PNS */ @@ -356,7 +364,7 @@ pptp_inbound_pkt(struct sk_buff *skb, unsigned int protoff, case PPTP_CALL_DISCONNECT_NOTIFY: /* server confirms disconnect */ cid = pptpReq->disc.callID; - pr_debug("%s, CID=%X\n", pptp_msg_name[msg], ntohs(cid)); + pr_debug("%s, CID=%X\n", pptp_msg_name(msg), ntohs(cid)); info->cstate = PPTP_CALL_NONE; /* untrack this call id, unexpect GRE packets */ @@ -383,7 +391,7 @@ pptp_inbound_pkt(struct sk_buff *skb, unsigned int protoff, invalid: pr_debug("invalid %s: type=%d cid=%u pcid=%u " "cstate=%d sstate=%d pns_cid=%u pac_cid=%u\n", - msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] : pptp_msg_name[0], + pptp_msg_name(msg), msg, ntohs(cid), ntohs(pcid), info->cstate, info->sstate, ntohs(info->pns_call_id), ntohs(info->pac_call_id)); return NF_ACCEPT; @@ -403,7 +411,7 @@ pptp_outbound_pkt(struct sk_buff *skb, unsigned int protoff, typeof(nf_nat_pptp_hook_outbound) nf_nat_pptp_outbound; msg = ntohs(ctlh->messageType); - pr_debug("outbound control message %s\n", pptp_msg_name[msg]); + pr_debug("outbound control message %s\n", pptp_msg_name(msg)); switch (msg) { case PPTP_START_SESSION_REQUEST: @@ -425,7 +433,7 @@ pptp_outbound_pkt(struct sk_buff *skb, unsigned int protoff, info->cstate = PPTP_CALL_OUT_REQ; /* track PNS call id */ cid = pptpReq->ocreq.callID; - pr_debug("%s, CID=%X\n", pptp_msg_name[msg], ntohs(cid)); + pr_debug("%s, CID=%X\n", pptp_msg_name(msg), ntohs(cid)); info->pns_call_id = cid; break; @@ -439,7 +447,7 @@ pptp_outbound_pkt(struct sk_buff *skb, unsigned int protoff, pcid = pptpReq->icack.peersCallID; if (info->pac_call_id != pcid) goto invalid; - pr_debug("%s, CID=%X PCID=%X\n", pptp_msg_name[msg], + pr_debug("%s, CID=%X PCID=%X\n", pptp_msg_name(msg), ntohs(cid), ntohs(pcid)); if (pptpReq->icack.resultCode == PPTP_INCALL_ACCEPT) { @@ -479,7 +487,7 @@ pptp_outbound_pkt(struct sk_buff *skb, unsigned int protoff, invalid: pr_debug("invalid %s: type=%d cid=%u pcid=%u " "cstate=%d sstate=%d pns_cid=%u pac_cid=%u\n", - msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] : pptp_msg_name[0], + pptp_msg_name(msg), msg, ntohs(cid), ntohs(pcid), info->cstate, info->sstate, ntohs(info->pns_call_id), ntohs(info->pac_call_id)); return NF_ACCEPT; diff --git a/net/netfilter/nf_nat_proto_udp.c b/net/netfilter/nf_nat_proto_udp.c index edd4a77dc09a837e71e4322d328033ee0af90ee9..167ad0dd269c92784147d87aef1f569d543f9307 100644 --- a/net/netfilter/nf_nat_proto_udp.c +++ b/net/netfilter/nf_nat_proto_udp.c @@ -66,15 +66,14 @@ static bool udp_manip_pkt(struct sk_buff *skb, enum nf_nat_manip_type maniptype) { struct udphdr *hdr; - bool do_csum; if (!skb_make_writable(skb, hdroff + sizeof(*hdr))) return false; hdr = (struct udphdr *)(skb->data + hdroff); - do_csum = hdr->check || skb->ip_summed == CHECKSUM_PARTIAL; + __udp_manip_pkt(skb, l3proto, iphdroff, hdr, tuple, maniptype, + !!hdr->check); - __udp_manip_pkt(skb, l3proto, iphdroff, hdr, tuple, maniptype, do_csum); return true; } diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c index dd1030f5dd5e290e88313895f882788272317e22..dfe4e6787219cb3150e4800c39930b01ae5891a4 100644 --- a/net/netfilter/nfnetlink_cthelper.c +++ b/net/netfilter/nfnetlink_cthelper.c @@ -106,7 +106,7 @@ nfnl_cthelper_from_nlattr(struct nlattr *attr, struct nf_conn *ct) if (help->helper->data_len == 0) return -EINVAL; - nla_memcpy(help->data, nla_data(attr), sizeof(help->data)); + nla_memcpy(help->data, attr, sizeof(help->data)); return 0; } @@ -240,6 +240,7 @@ nfnl_cthelper_create(const struct nlattr * const tb[], ret = -ENOMEM; goto err2; } + helper->data_len = size; helper->flags |= NF_CT_HELPER_F_USERSPACE; memcpy(&helper->tuple, tuple, sizeof(struct nf_conntrack_tuple)); diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c index ed548d06b6dda9a98888bb83f2baa6b45c965c15..a18cceecef88e3789bc1cd3613cdb1f2eac17572 100644 --- a/net/netfilter/nft_nat.c +++ b/net/netfilter/nft_nat.c @@ -135,7 +135,7 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr, priv->type = NF_NAT_MANIP_DST; break; default: - return -EINVAL; + return -EOPNOTSUPP; } if (tb[NFTA_NAT_FAMILY] == NULL) @@ -202,7 +202,7 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr, if (tb[NFTA_NAT_FLAGS]) { priv->flags = ntohl(nla_get_be32(tb[NFTA_NAT_FLAGS])); if (priv->flags & ~NF_NAT_RANGE_MASK) - return -EINVAL; + return -EOPNOTSUPP; } return nf_ct_netns_get(ctx->net, family); diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c index ee3e5b6471a69ec0100ea9099dc2e8be24a4b024..15fe2120b31096983071f36ff2e066a8fb7d4443 100644 --- a/net/netlabel/netlabel_kapi.c +++ b/net/netlabel/netlabel_kapi.c @@ -748,6 +748,12 @@ int netlbl_catmap_getlong(struct netlbl_lsm_catmap *catmap, if ((off & (BITS_PER_LONG - 1)) != 0) return -EINVAL; + /* a null catmap is equivalent to an empty one */ + if (!catmap) { + *offset = (u32)-1; + return 0; + } + if (off < catmap->startbit) { off = catmap->startbit; *offset = off; diff --git a/net/qrtr/ethernet.c b/net/qrtr/ethernet.c index 27bf1d61dcd35034e53307bdec1e67b16f9ace96..d302891fa9e157df9fc81762b6c36fb49dd054c9 100644 --- a/net/qrtr/ethernet.c +++ b/net/qrtr/ethernet.c @@ -178,10 +178,10 @@ void qcom_ethernet_qrtr_dl_cb(struct eth_adapt_result *eth_res) while (len > 0) { if (dlbuf->needed > 0) { pkt_len = dlbuf->pkt_len; - dlbuf->buf = dlbuf->buf + dlbuf->saved; if (len >= dlbuf->needed) { dlbuf->needed = set_cp_size(dlbuf->needed); - memcpy(dlbuf->buf, src, dlbuf->needed); + memcpy((dlbuf->buf + dlbuf->saved), + src, dlbuf->needed); rc = qrtr_endpoint_post(&qdev->ep, dlbuf->buf, pkt_len); if (rc == -EINVAL) { @@ -189,13 +189,15 @@ void qcom_ethernet_qrtr_dl_cb(struct eth_adapt_result *eth_res) "Invalid qrtr packet\n"); goto exit; } + memset(dlbuf->buf, 0, MAX_BUFSIZE); len = len - dlbuf->needed; src = src + dlbuf->needed; dlbuf->needed = 0; + dlbuf->pkt_len = 0; } else { /* Partial packet */ len = set_cp_size(len); - memcpy(dlbuf->buf, src, len); + memcpy(dlbuf->buf + dlbuf->saved, src, len); dlbuf->saved = dlbuf->saved + len; dlbuf->needed = dlbuf->needed - len; break; @@ -214,6 +216,12 @@ void qcom_ethernet_qrtr_dl_cb(struct eth_adapt_result *eth_res) break; } + if (pkt_len > MAX_BUFSIZE) { + dev_err(qdev->dev, + "Unsupported pkt_len %zu\n", pkt_len); + break; + } + if (pkt_len > len) { /* Partial packet */ dlbuf->needed = pkt_len - len; @@ -230,11 +238,11 @@ void qcom_ethernet_qrtr_dl_cb(struct eth_adapt_result *eth_res) dev_err(qdev->dev, "Invalid qrtr packet\n"); goto exit; } - pkt_len = set_cp_size(pkt_len); - memset(dlbuf->buf, 0, pkt_len); + memset(dlbuf->buf, 0, MAX_BUFSIZE); len = len - pkt_len; src = src + pkt_len; dlbuf->needed = 0; + dlbuf->pkt_len = 0; } } exit: diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c index d838f5aca42a004fa3fa3eb1876abc60c96dddf4..54f2d331d21262417d1486159cc0543ffb9f0a94 100644 --- a/net/qrtr/qrtr.c +++ b/net/qrtr/qrtr.c @@ -135,6 +135,15 @@ static DECLARE_RWSEM(qrtr_node_lock); static DEFINE_IDR(qrtr_ports); static DEFINE_MUTEX(qrtr_port_lock); +/* backup buffers */ +#define QRTR_BACKUP_HI_NUM 5 +#define QRTR_BACKUP_HI_SIZE SZ_16K +#define QRTR_BACKUP_LO_NUM 20 +#define QRTR_BACKUP_LO_SIZE SZ_1K +static struct sk_buff_head qrtr_backup_lo; +static struct sk_buff_head qrtr_backup_hi; +static struct work_struct qrtr_backup_work; + /** * struct qrtr_node - endpoint node * @ep_lock: lock for endpoint management and callbacks @@ -694,6 +703,54 @@ int qrtr_peek_pkt_size(const void *data) } EXPORT_SYMBOL(qrtr_peek_pkt_size); +static void qrtr_alloc_backup(struct work_struct *work) +{ + struct sk_buff *skb; + + while (skb_queue_len(&qrtr_backup_lo) < QRTR_BACKUP_LO_NUM) { + skb = alloc_skb(QRTR_BACKUP_LO_SIZE, GFP_KERNEL); + if (!skb) + break; + skb_queue_tail(&qrtr_backup_lo, skb); + } + while (skb_queue_len(&qrtr_backup_hi) < QRTR_BACKUP_HI_NUM) { + skb = alloc_skb(QRTR_BACKUP_HI_SIZE, GFP_KERNEL); + if (!skb) + break; + skb_queue_tail(&qrtr_backup_hi, skb); + } +} + +static struct sk_buff *qrtr_get_backup(size_t len) +{ + struct sk_buff *skb = NULL; + + if (len < QRTR_BACKUP_LO_SIZE) + skb = skb_dequeue(&qrtr_backup_lo); + else if (len < QRTR_BACKUP_HI_SIZE) + skb = skb_dequeue(&qrtr_backup_hi); + + if (skb) + queue_work(system_unbound_wq, &qrtr_backup_work); + + return skb; +} + +static void qrtr_backup_init(void) +{ + skb_queue_head_init(&qrtr_backup_lo); + skb_queue_head_init(&qrtr_backup_hi); + INIT_WORK(&qrtr_backup_work, qrtr_alloc_backup); + queue_work(system_unbound_wq, &qrtr_backup_work); +} + +static void qrtr_backup_deinit(void) +{ + cancel_work_sync(&qrtr_backup_work); + skb_queue_purge(&qrtr_backup_lo); + skb_queue_purge(&qrtr_backup_hi); +} + /** * qrtr_endpoint_post() - post incoming data * @ep: endpoint handle @@ -718,8 +775,13 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len) return -EINVAL; skb = alloc_skb_with_frags(sizeof(*v1), len, 0, &errcode, GFP_ATOMIC); - if (!skb) - return -ENOMEM; + if (!skb) { + skb = qrtr_get_backup(len); + if (!skb) { + pr_err("qrtr: Unable to get skb with len:%lu\n", len); + return -ENOMEM; + } + } skb_reserve(skb, sizeof(*v1)); cb = (struct qrtr_cb *)skb->cb; @@ -1485,7 +1547,7 @@ static int qrtr_bcast_enqueue(struct qrtr_node *node, struct sk_buff *skb, } up_read(&qrtr_node_lock); - qrtr_local_enqueue(node, skb, type, from, to, flags); + qrtr_local_enqueue(NULL, skb, type, from, to, flags); return 0; } @@ -1951,7 +2013,10 @@ static int __init qrtr_proto_init(void) rtnl_register(PF_QIPCRTR, RTM_NEWADDR, qrtr_addr_doit, NULL, 0); + qrtr_backup_init(); + return 0; + } postcore_initcall(qrtr_proto_init); @@ -1960,6 +2025,8 @@ static void __exit qrtr_proto_fini(void) rtnl_unregister(PF_QIPCRTR, RTM_NEWADDR); sock_unregister(qrtr_family.family); proto_unregister(&qrtr_proto); + + qrtr_backup_deinit(); } module_exit(qrtr_proto_fini); diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c index 2dd13f5c47c86be7ff1d2c9f411bc2436894a303..61425179780c3ede41ccf2a389e827ca0afe88c0 100644 --- a/net/rxrpc/call_accept.c +++ b/net/rxrpc/call_accept.c @@ -26,6 +26,11 @@ #include #include "ar-internal.h" +static void rxrpc_dummy_notify(struct sock *sk, struct rxrpc_call *call, + unsigned long user_call_ID) +{ +} + /* * Preallocate a single service call, connection and peer and, if possible, * give them a user ID and attach the user's side of the ID to them. @@ -227,6 +232,8 @@ void rxrpc_discard_prealloc(struct rxrpc_sock *rx) if (rx->discard_new_call) { _debug("discard %lx", call->user_call_ID); rx->discard_new_call(call, call->user_call_ID); + if (call->notify_rx) + call->notify_rx = rxrpc_dummy_notify; rxrpc_put_call(call, rxrpc_call_put_kernel); } rxrpc_call_completed(call); diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c index 18ce6f97462b6c23372df33a75614e5dfcf9d0ad..98285b117a7c0a8e48490ceb5031216abf48c877 100644 --- a/net/rxrpc/input.c +++ b/net/rxrpc/input.c @@ -664,13 +664,12 @@ static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb, ntohl(ackinfo->rxMTU), ntohl(ackinfo->maxMTU), rwind, ntohl(ackinfo->jumbo_max)); + if (rwind > RXRPC_RXTX_BUFF_SIZE - 1) + rwind = RXRPC_RXTX_BUFF_SIZE - 1; if (call->tx_winsize != rwind) { - if (rwind > RXRPC_RXTX_BUFF_SIZE - 1) - rwind = RXRPC_RXTX_BUFF_SIZE - 1; if (rwind > call->tx_winsize) wake = true; - trace_rxrpc_rx_rwind_change(call, sp->hdr.serial, - ntohl(ackinfo->rwind), wake); + trace_rxrpc_rx_rwind_change(call, sp->hdr.serial, rwind, wake); call->tx_winsize = rwind; } diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c index 2af42c7d5b82a55f106232d8f04896f256574c7a..383292adcac6dc4a2bccb12ce3281ca2fe5343b4 100644 --- a/net/rxrpc/local_object.c +++ b/net/rxrpc/local_object.c @@ -134,10 +134,10 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net) } switch (local->srx.transport.family) { - case AF_INET: - /* we want to receive ICMP errors */ + case AF_INET6: + /* we want to receive ICMPv6 errors */ opt = 1; - ret = kernel_setsockopt(local->socket, SOL_IP, IP_RECVERR, + ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_RECVERR, (char *) &opt, sizeof(opt)); if (ret < 0) { _debug("setsockopt failed"); @@ -145,19 +145,22 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net) } /* we want to set the don't fragment bit */ - opt = IP_PMTUDISC_DO; - ret = kernel_setsockopt(local->socket, SOL_IP, IP_MTU_DISCOVER, + opt = IPV6_PMTUDISC_DO; + ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_MTU_DISCOVER, (char *) &opt, sizeof(opt)); if (ret < 0) { _debug("setsockopt failed"); goto error; } - break; - case AF_INET6: + /* Fall through and set IPv4 options too otherwise we don't get + * errors from IPv4 packets sent through the IPv6 socket. + */ + + case AF_INET: /* we want to receive ICMP errors */ opt = 1; - ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_RECVERR, + ret = kernel_setsockopt(local->socket, SOL_IP, IP_RECVERR, (char *) &opt, sizeof(opt)); if (ret < 0) { _debug("setsockopt failed"); @@ -165,8 +168,8 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net) } /* we want to set the don't fragment bit */ - opt = IPV6_PMTUDISC_DO; - ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_MTU_DISCOVER, + opt = IP_PMTUDISC_DO; + ret = kernel_setsockopt(local->socket, SOL_IP, IP_MTU_DISCOVER, (char *) &opt, sizeof(opt)); if (ret < 0) { _debug("setsockopt failed"); diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c index 77cb23c7bd0a8421c4290dfc5a61d18ff1b82a40..48fad9ba8601df319460d2ca54251aa465114705 100644 --- a/net/rxrpc/rxkad.c +++ b/net/rxrpc/rxkad.c @@ -1111,7 +1111,7 @@ static int rxkad_verify_response(struct rxrpc_connection *conn, ret = rxkad_decrypt_ticket(conn, skb, ticket, ticket_len, &session_key, &expiry, _abort_code); if (ret < 0) - goto temporary_error_free_resp; + goto temporary_error_free_ticket; /* use the session key from inside the ticket to decrypt the * response */ @@ -1193,7 +1193,6 @@ static int rxkad_verify_response(struct rxrpc_connection *conn, temporary_error_free_ticket: kfree(ticket); -temporary_error_free_resp: kfree(response); temporary_error: /* Ignore the response packet if we got a temporary error such as diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c index c5fcdf1a58a08824aef5b48c790a019e17134967..9198c9983b8366af6853c75f90033ae9a075c4fa 100644 --- a/net/sched/sch_atm.c +++ b/net/sched/sch_atm.c @@ -545,15 +545,15 @@ static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt) if (!p->link.q) p->link.q = &noop_qdisc; pr_debug("atm_tc_init: link (%p) qdisc %p\n", &p->link, p->link.q); + p->link.vcc = NULL; + p->link.sock = NULL; + p->link.common.classid = sch->handle; + p->link.ref = 1; err = tcf_block_get(&p->link.block, &p->link.filter_list); if (err) return err; - p->link.vcc = NULL; - p->link.sock = NULL; - p->link.common.classid = sch->handle; - p->link.ref = 1; tasklet_init(&p->task, sch_atm_dequeue, (unsigned long)sch); return 0; } diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c index 531250fceb9e5a75d6a8b843e5e5fd9d481fddf2..5a98618b47e86a49d865123931616197fc8b92d1 100644 --- a/net/sched/sch_choke.c +++ b/net/sched/sch_choke.c @@ -327,7 +327,8 @@ static void choke_reset(struct Qdisc *sch) sch->q.qlen = 0; sch->qstats.backlog = 0; - memset(q->tab, 0, (q->tab_mask + 1) * sizeof(struct sk_buff *)); + if (q->tab) + memset(q->tab, 0, (q->tab_mask + 1) * sizeof(struct sk_buff *)); q->head = q->tail = 0; red_restart(&q->vars); } diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index de3b57ceca7bd625c874fbece917c944aabc26d8..4faa631139af088c1ca34a145bb494019a821b89 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c @@ -427,7 +427,7 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt) q->quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM])); if (tb[TCA_FQ_CODEL_DROP_BATCH_SIZE]) - q->drop_batch_size = min(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE])); + q->drop_batch_size = max(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE])); if (tb[TCA_FQ_CODEL_MEMORY_LIMIT]) q->memory_limit = min(1U << 31, nla_get_u32(tb[TCA_FQ_CODEL_MEMORY_LIMIT])); diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 21b981abbacb575353e86fc7f99123b2f5179bb0..091a9746627fa92cf26256cd0abaaa433cb81a5c 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -341,6 +341,7 @@ void __netdev_watchdog_up(struct net_device *dev) dev_hold(dev); } } +EXPORT_SYMBOL_GPL(__netdev_watchdog_up); static void dev_watchdog_up(struct net_device *dev) { diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index cbc54ddfe076a006e7d78fc8569a6e0cd0b8be85..1eae4de319b3d8e9c1b45d6ceff6b8f9342d97fe 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c @@ -639,6 +639,15 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt) if (ctl->divisor && (!is_power_of_2(ctl->divisor) || ctl->divisor > 65536)) return -EINVAL; + + /* slot->allot is a short, make sure quantum is not too big. */ + if (ctl->quantum) { + unsigned int scaled = SFQ_ALLOT_SIZE(ctl->quantum); + + if (scaled <= 0 || scaled > SHRT_MAX) + return -EINVAL; + } + if (ctl_v1 && !red_check_params(ctl_v1->qth_min, ctl_v1->qth_max, ctl_v1->Wlog)) return -EINVAL; diff --git a/net/sctp/associola.c b/net/sctp/associola.c index dd1a3bd80be5f0ed5036ac28db238f082f4e0637..0a5764016721b699502c3dc6d6fea32a0353b1a7 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -1598,12 +1598,15 @@ void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len) int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc, enum sctp_scope scope, gfp_t gfp) { + struct sock *sk = asoc->base.sk; int flags; /* Use scoping rules to determine the subset of addresses from * the endpoint. */ - flags = (PF_INET6 == asoc->base.sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0; + flags = (PF_INET6 == sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0; + if (!inet_v6_ipv6only(sk)) + flags |= SCTP_ADDR4_ALLOWED; if (asoc->peer.ipv4_address) flags |= SCTP_ADDR4_PEERSUPP; if (asoc->peer.ipv6_address) diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c index 7df3704982f547eda452cac2131afae81a5c7e9e..38d01cfb313e51776e2206e521a8dc82316aaa18 100644 --- a/net/sctp/bind_addr.c +++ b/net/sctp/bind_addr.c @@ -453,6 +453,7 @@ static int sctp_copy_one_addr(struct net *net, struct sctp_bind_addr *dest, * well as the remote peer. */ if ((((AF_INET == addr->sa.sa_family) && + (flags & SCTP_ADDR4_ALLOWED) && (flags & SCTP_ADDR4_PEERSUPP))) || (((AF_INET6 == addr->sa.sa_family) && (flags & SCTP_ADDR6_ALLOWED) && diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index 18efb8cc46932735123fa41e7c3c6b56d0bf9ba7..b61e9ed109f6592c0d56d1545a3ef9f507b8f5cc 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -271,7 +271,7 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final); rcu_read_unlock(); - dst = ip6_dst_lookup_flow(sk, fl6, final_p); + dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_p); if (!asoc || saddr) { t->dst = dst; memcpy(fl, &_fl, sizeof(_fl)); @@ -329,7 +329,7 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, fl6->saddr = laddr->a.v6.sin6_addr; fl6->fl6_sport = laddr->a.v6.sin6_port; final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final); - bdst = ip6_dst_lookup_flow(sk, fl6, final_p); + bdst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_p); if (IS_ERR(bdst)) continue; diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 785456df750518aacd7bdc03e1922d1c2a67bfee..8fe9c0646205262a48f6cf50f27f633e08dc33d2 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -213,7 +213,8 @@ int sctp_copy_local_addr_list(struct net *net, struct sctp_bind_addr *bp, * sock as well as the remote peer. */ if (addr->a.sa.sa_family == AF_INET && - !(copy_flags & SCTP_ADDR4_PEERSUPP)) + (!(copy_flags & SCTP_ADDR4_ALLOWED) || + !(copy_flags & SCTP_ADDR4_PEERSUPP))) continue; if (addr->a.sa.sa_family == AF_INET6 && (!(copy_flags & SCTP_ADDR6_ALLOWED) || diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index ba29d782af30d52c5f45df6b25cbcf4411243c8c..80b9f7f097fa86df6d7fbbb96c0f27fb133143aa 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -1829,12 +1829,13 @@ static enum sctp_disposition sctp_sf_do_dupcook_a( /* Update the content of current association. */ sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc)); sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev)); - if (sctp_state(asoc, SHUTDOWN_PENDING) && + if ((sctp_state(asoc, SHUTDOWN_PENDING) || + sctp_state(asoc, SHUTDOWN_SENT)) && (sctp_sstate(asoc->base.sk, CLOSING) || sock_flag(asoc->base.sk, SOCK_DEAD))) { - /* if were currently in SHUTDOWN_PENDING, but the socket - * has been closed by user, don't transition to ESTABLISHED. - * Instead trigger SHUTDOWN bundled with COOKIE_ACK. + /* If the socket has been closed by user, don't + * transition to ESTABLISHED. Instead trigger SHUTDOWN + * bundled with COOKIE_ACK. */ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); return sctp_sf_do_9_2_start_shutdown(net, ep, asoc, diff --git a/net/sunrpc/addr.c b/net/sunrpc/addr.c index 2e0a6f92e563d7942b3bf7fa17b43e3f44dc9355..8391c2785550135d270b1039f5a7b7670aa05ef7 100644 --- a/net/sunrpc/addr.c +++ b/net/sunrpc/addr.c @@ -81,11 +81,11 @@ static size_t rpc_ntop6(const struct sockaddr *sap, rc = snprintf(scopebuf, sizeof(scopebuf), "%c%u", IPV6_SCOPE_DELIMITER, sin6->sin6_scope_id); - if (unlikely((size_t)rc > sizeof(scopebuf))) + if (unlikely((size_t)rc >= sizeof(scopebuf))) return 0; len += rc; - if (unlikely(len > buflen)) + if (unlikely(len >= buflen)) return 0; strcat(buf, scopebuf); diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c index 5fec3abbe19bb640de31bf85bfc9becdeab5f359..c7d88f979c5668a2073203f3f158ef8075928088 100644 --- a/net/sunrpc/auth_gss/gss_mech_switch.c +++ b/net/sunrpc/auth_gss/gss_mech_switch.c @@ -61,6 +61,8 @@ gss_mech_free(struct gss_api_mech *gm) for (i = 0; i < gm->gm_pf_num; i++) { pf = &gm->gm_pfs[i]; + if (pf->domain) + auth_domain_put(pf->domain); kfree(pf->auth_domain_name); pf->auth_domain_name = NULL; } @@ -83,6 +85,7 @@ make_auth_domain_name(char *name) static int gss_mech_svc_setup(struct gss_api_mech *gm) { + struct auth_domain *dom; struct pf_desc *pf; int i, status; @@ -92,10 +95,13 @@ gss_mech_svc_setup(struct gss_api_mech *gm) status = -ENOMEM; if (pf->auth_domain_name == NULL) goto out; - status = svcauth_gss_register_pseudoflavor(pf->pseudoflavor, - pf->auth_domain_name); - if (status) + dom = svcauth_gss_register_pseudoflavor( + pf->pseudoflavor, pf->auth_domain_name); + if (IS_ERR(dom)) { + status = PTR_ERR(dom); goto out; + } + pf->domain = dom; } return 0; out: diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c index a457e7afb7688c628395f5596d348a640d5f3f5f..03043d5221e926ec162489e464e860aaae7fca13 100644 --- a/net/sunrpc/auth_gss/svcauth_gss.c +++ b/net/sunrpc/auth_gss/svcauth_gss.c @@ -779,7 +779,7 @@ u32 svcauth_gss_flavor(struct auth_domain *dom) EXPORT_SYMBOL_GPL(svcauth_gss_flavor); -int +struct auth_domain * svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name) { struct gss_domain *new; @@ -796,21 +796,23 @@ svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name) new->h.flavour = &svcauthops_gss; new->pseudoflavor = pseudoflavor; - stat = 0; test = auth_domain_lookup(name, &new->h); - if (test != &new->h) { /* Duplicate registration */ + if (test != &new->h) { + pr_warn("svc: duplicate registration of gss pseudo flavour %s.\n", + name); + stat = -EADDRINUSE; auth_domain_put(test); - kfree(new->h.name); - goto out_free_dom; + goto out_free_name; } - return 0; + return test; +out_free_name: + kfree(new->h.name); out_free_dom: kfree(new); out: - return stat; + return ERR_PTR(stat); } - EXPORT_SYMBOL_GPL(svcauth_gss_register_pseudoflavor); static inline int diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 34f94052c519dcfbb36ae700319f3a5a107b7598..137f92bfafac7355d1b20df0251d8a312b58ad92 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c @@ -1347,6 +1347,7 @@ rpc_gssd_dummy_populate(struct dentry *root, struct rpc_pipe *pipe_data) q.len = strlen(gssd_dummy_clnt_dir[0].name); clnt_dentry = d_hash_and_lookup(gssd_dentry, &q); if (!clnt_dentry) { + __rpc_depopulate(gssd_dentry, gssd_dummy_clnt_dir, 0, 1); pipe_dentry = ERR_PTR(-ENOENT); goto out; } diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index 4f382805eb9c0c8632d4691b71b887e33eace2df..87cf0b933f999430b739974fe9b26cb02fe30d99 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c @@ -1036,6 +1036,7 @@ xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf, base = 0; } else { base -= buf->head[0].iov_len; + subbuf->head[0].iov_base = buf->head[0].iov_base; subbuf->head[0].iov_len = 0; } @@ -1048,6 +1049,8 @@ xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf, base = 0; } else { base -= buf->page_len; + subbuf->pages = buf->pages; + subbuf->page_base = 0; subbuf->page_len = 0; } @@ -1059,6 +1062,7 @@ xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf, base = 0; } else { base -= buf->tail[0].iov_len; + subbuf->tail[0].iov_base = buf->tail[0].iov_base; subbuf->tail[0].iov_len = 0; } diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c index de011fdd79645f3dd909b01ee3dd8b7d330a8ef7..4d0eb41efebeaff31dbd58fc8d5d418294e34059 100644 --- a/net/tipc/udp_media.c +++ b/net/tipc/udp_media.c @@ -187,10 +187,13 @@ static int tipc_udp_xmit(struct net *net, struct sk_buff *skb, .saddr = src->ipv6, .flowi6_proto = IPPROTO_UDP }; - err = ipv6_stub->ipv6_dst_lookup(net, ub->ubsock->sk, &ndst, - &fl6); - if (err) + ndst = ipv6_stub->ipv6_dst_lookup_flow(net, + ub->ubsock->sk, + &fl6, NULL); + if (IS_ERR(ndst)) { + err = PTR_ERR(ndst); goto tx_error; + } ttl = ip6_dst_hoplimit(ndst); err = udp_tunnel6_xmit_skb(ndst, ub->ubsock->sk, skb, NULL, &src->ipv6, &dst->ipv6, 0, ttl, 0, diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c index 73eac97e19fb16306131dddacd0334786fc99618..f297a427b421b2918ccf46cc1be35bb144146370 100644 --- a/net/vmw_vsock/af_vsock.c +++ b/net/vmw_vsock/af_vsock.c @@ -1290,7 +1290,7 @@ static int vsock_accept(struct socket *sock, struct socket *newsock, int flags, /* Wait for children sockets to appear; these are the new sockets * created upon connection establishment. */ - timeout = sock_sndtimeo(listener, flags & O_NONBLOCK); + timeout = sock_rcvtimeo(listener, flags & O_NONBLOCK); prepare_to_wait(sk_sleep(listener), &wait, TASK_INTERRUPTIBLE); while ((connected = vsock_dequeue_accept(listener)) == NULL && diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c index 2a4764b0183affad220607ccf088adc357bbeab8..af87815b1bb3cc194ac8afe115325e9904c98532 100644 --- a/net/xfrm/xfrm_input.c +++ b/net/xfrm/xfrm_input.c @@ -407,7 +407,7 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type) dev_put(skb->dev); spin_lock(&x->lock); - if (nexthdr <= 0) { + if (nexthdr < 0) { if (nexthdr == -EBADMSG) { xfrm_audit_state_icvfail(x, skb, x->type->proto); diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c index 97b191c3ede0770b4bad1d98edaa6272701cdff0..00ff2a1c5e5fe96a2fe9a6d5b707f8b811545720 100644 --- a/net/xfrm/xfrm_output.c +++ b/net/xfrm/xfrm_output.c @@ -235,18 +235,20 @@ int xfrm_output(struct sock *sk, struct sk_buff *skb) xfrm_state_hold(x); if (skb_is_gso(skb)) { - skb_shinfo(skb)->gso_type |= SKB_GSO_ESP; + if (skb->inner_protocol) + return xfrm_output_gso(net, sk, skb); - return xfrm_output2(net, sk, skb); + skb_shinfo(skb)->gso_type |= SKB_GSO_ESP; + goto out; } if (x->xso.dev && x->xso.dev->features & NETIF_F_HW_ESP_TX_CSUM) goto out; + } else { + if (skb_is_gso(skb)) + return xfrm_output_gso(net, sk, skb); } - if (skb_is_gso(skb)) - return xfrm_output_gso(net, sk, skb); - if (skb->ip_summed == CHECKSUM_PARTIAL) { err = skb_checksum_help(skb); if (err) { @@ -283,7 +285,8 @@ void xfrm_local_error(struct sk_buff *skb, int mtu) if (skb->protocol == htons(ETH_P_IP)) proto = AF_INET; - else if (skb->protocol == htons(ETH_P_IPV6)) + else if (skb->protocol == htons(ETH_P_IPV6) && + skb->sk->sk_family == AF_INET6) proto = AF_INET6; else return; diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 5f5ad0dff5a40d8ba5b6a243c0de35cfb3ce82d7..b0811d71eb43cff1ce3c7746a6f2243876fae44f 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -730,12 +730,7 @@ static void xfrm_policy_requeue(struct xfrm_policy *old, static bool xfrm_policy_mark_match(struct xfrm_policy *policy, struct xfrm_policy *pol) { - u32 mark = policy->mark.v & policy->mark.m; - - if (policy->mark.v == pol->mark.v && policy->mark.m == pol->mark.m) - return true; - - if ((mark & pol->mark.m) == pol->mark.v && + if (policy->mark.v == pol->mark.v && policy->priority == pol->priority) return true; diff --git a/samples/bpf/lwt_len_hist_user.c b/samples/bpf/lwt_len_hist_user.c index 7fcb94c09112e97eec0d276a7fb6fb0c5434f704..965108527a4f1f1c6f2b46f5b37a4327b3ea1926 100644 --- a/samples/bpf/lwt_len_hist_user.c +++ b/samples/bpf/lwt_len_hist_user.c @@ -15,8 +15,6 @@ #define MAX_INDEX 64 #define MAX_STARS 38 -char bpf_log_buf[BPF_LOG_BUF_SIZE]; - static void stars(char *str, long val, long max, int width) { int i; diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include index 08969e192c388f94bb3013d12a785bb6c579f34a..77274ab0c2d267e2170b51c913ebcd90c257e3f0 100644 --- a/scripts/Kbuild.include +++ b/scripts/Kbuild.include @@ -82,20 +82,21 @@ cc-cross-prefix = \ fi))) # output directory for tests below -TMPOUT := $(if $(KBUILD_EXTMOD),$(firstword $(KBUILD_EXTMOD))/) +TMPOUT = $(if $(KBUILD_EXTMOD),$(firstword $(KBUILD_EXTMOD))/).tmp_$$$$ # try-run # Usage: option = $(call try-run, $(CC)...-o "$$TMP",option-ok,otherwise) # Exit code chooses option. "$$TMP" serves as a temporary file and is # automatically cleaned up. try-run = $(shell set -e; \ - TMP="$(TMPOUT).$$$$.tmp"; \ - TMPO="$(TMPOUT).$$$$.o"; \ + TMP=$(TMPOUT)/tmp; \ + TMPO=$(TMPOUT)/tmp.o; \ + mkdir -p $(TMPOUT); \ + trap "rm -rf $(TMPOUT)" EXIT; \ if ($(1)) >/dev/null 2>&1; \ then echo "$(2)"; \ else echo "$(3)"; \ - fi; \ - rm -f "$$TMP" "$$TMPO") + fi) # as-option # Usage: cflags-y += $(call as-option,-Wa$(comma)-isa=foo,) diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib index 446540515adb73a66d613462900a2b0d67febdca..dcacdc946013f2b97f07cc3a16911905ac0666ac 100644 --- a/scripts/Makefile.lib +++ b/scripts/Makefile.lib @@ -372,7 +372,8 @@ cmd_lzo = (cat $(filter-out FORCE,$^) | \ quiet_cmd_lz4 = LZ4 $@ cmd_lz4 = (cat $(filter-out FORCE,$^) | \ - lz4c -l -c1 stdin stdout && $(call size_append, $(filter-out FORCE,$^))) > $@ || \ + lz4 -l -12 --favor-decSpeed stdin stdout && \ + $(call size_append, $(filter-out FORCE,$^))) > $@ || \ (rm -f $@ ; false) # U-Boot mkimage diff --git a/scripts/decode_stacktrace.sh b/scripts/decode_stacktrace.sh index 5aa75a0a1cede6fce64169c5dec9080c89d79197..946735bd5a252652b3a39e834c685e7c9f5d1a5d 100755 --- a/scripts/decode_stacktrace.sh +++ b/scripts/decode_stacktrace.sh @@ -77,8 +77,8 @@ parse_symbol() { return fi - # Strip out the base of the path - code=${code#$basepath/} + # Strip out the base of the path on each line + code=$(while read -r line; do echo "${line#$basepath/}"; done <<< "$code") # In the case of inlines, move everything to same line code=${code//$'\n'/' '} diff --git a/scripts/decodecode b/scripts/decodecode index 438120da1361002833a2014b1ca3fb02cc0aa539..1ab4ef613cb078f709e9cb75d775a38a25cac397 100755 --- a/scripts/decodecode +++ b/scripts/decodecode @@ -99,7 +99,7 @@ faultlinenum=$(( $(wc -l $T.oo | cut -d" " -f1) - \ faultline=`cat $T.dis | head -1 | cut -d":" -f2-` faultline=`echo "$faultline" | sed -e 's/\[/\\\[/g; s/\]/\\\]/g'` -cat $T.oo | sed -e "${faultlinenum}s/^\(.*:\)\(.*\)/\1\*\2\t\t<-- trapping instruction/" +cat $T.oo | sed -e "${faultlinenum}s/^\([^:]*:\)\(.*\)/\1\*\2\t\t<-- trapping instruction/" echo cat $T.aa cleanup diff --git a/scripts/gcc-plugins/Makefile b/scripts/gcc-plugins/Makefile index ea465799ced590a3f9653500642b71d81a6d353e..8c05b54949a0bcde1f4a6181b59f85cfd461317c 100644 --- a/scripts/gcc-plugins/Makefile +++ b/scripts/gcc-plugins/Makefile @@ -10,6 +10,7 @@ else HOST_EXTRACXXFLAGS += -I$(GCC_PLUGINS_DIR)/include -I$(src) -std=gnu++98 -fno-rtti HOST_EXTRACXXFLAGS += -fno-exceptions -fasynchronous-unwind-tables -ggdb HOST_EXTRACXXFLAGS += -Wno-narrowing -Wno-unused-variable + HOST_EXTRACXXFLAGS += -Wno-format-diag export HOST_EXTRACXXFLAGS endif diff --git a/scripts/gcc-plugins/gcc-common.h b/scripts/gcc-plugins/gcc-common.h index 797e3786b415f42ea6f31cf1ecdc04b3312b3ac4..01312b1d6294f5789b2fd8dee13191dcd8697d1b 100644 --- a/scripts/gcc-plugins/gcc-common.h +++ b/scripts/gcc-plugins/gcc-common.h @@ -35,7 +35,9 @@ #include "ggc.h" #include "timevar.h" +#if BUILDING_GCC_VERSION < 10000 #include "params.h" +#endif #if BUILDING_GCC_VERSION <= 4009 #include "pointer-set.h" @@ -841,6 +843,7 @@ static inline gimple gimple_build_assign_with_ops(enum tree_code subcode, tree l return gimple_build_assign(lhs, subcode, op1, op2 PASS_MEM_STAT); } +#if BUILDING_GCC_VERSION < 10000 template <> template <> inline bool is_a_helper::test(const_gimple gs) @@ -854,6 +857,7 @@ inline bool is_a_helper::test(const_gimple gs) { return gs->code == GIMPLE_RETURN; } +#endif static inline gasm *as_a_gasm(gimple stmt) { diff --git a/scripts/mksysmap b/scripts/mksysmap index a35acc0d0b827fa056348a76d074d5ef0a3dae93..9aa23d15862a02b0ae86646b0cb6d7f84f13c5f8 100755 --- a/scripts/mksysmap +++ b/scripts/mksysmap @@ -41,4 +41,4 @@ # so we just ignore them to let readprofile continue to work. # (At least sparc64 has __crc_ in the middle). -$NM -n $1 | grep -v '\( [aNUw] \)\|\(__crc_\)\|\( \$[adt]\)\|\( .L\)' > $2 +$NM -n $1 | grep -v '\( [aNUw] \)\|\(__crc_\)\|\( \$[adt]\)\|\( \.L\)' > $2 diff --git a/scripts/module-lto.lds.S b/scripts/module-lto.lds.S index c0f4fdeb84a08334f8ff413ddc833fc2c0bfc6ca..a89f0c57b36f15ad8995ab0c72f9e6bee744533d 100644 --- a/scripts/module-lto.lds.S +++ b/scripts/module-lto.lds.S @@ -15,12 +15,20 @@ SECTIONS { *(.eh_frame) } - .bss : { *(.bss .bss.[0-9a-zA-Z_]*) } - .data : { *(.data .data.[0-9a-zA-Z_]*) } - .rela.data : { *(.rela.data .rela.data.[0-9a-zA-Z_]*) } - .rela.rodata : { *(.rela.rodata .rela.rodata.[0-9a-zA-Z_]*) } - .rela.text : { *(.rela.text .rela.text.[0-9a-zA-Z_]*) } - .rodata : { *(.rodata .rodata.[0-9a-zA-Z_]*) } + .bss : { + *(.bss .bss.[0-9a-zA-Z_]*) + *(.bss..L*) + } + + .data : { + *(.data .data.[0-9a-zA-Z_]*) + *(.data..L*) + } + + .rodata : { + *(.rodata .rodata.[0-9a-zA-Z_]*) + *(.rodata..L*) + } /* * With CFI_CLANG, ensure __cfi_check is at the beginning of the @@ -30,5 +38,4 @@ SECTIONS { *(.text.__cfi_check) *(.text .text.[0-9a-zA-Z_]* .text..L.cfi*) } - } diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c index c106988c1b254eb1c8400378599c398c59300e63..5341d8e52a2b3dc9bd4cf3af1ebe434ffd501a97 100644 --- a/security/apparmor/apparmorfs.c +++ b/security/apparmor/apparmorfs.c @@ -426,7 +426,7 @@ static ssize_t policy_update(u32 mask, const char __user *buf, size_t size, */ error = aa_may_manage_policy(label, ns, mask); if (error) - return error; + goto end_section; data = aa_simple_write_to_buffer(buf, size, size, pos); error = PTR_ERR(data); @@ -434,6 +434,7 @@ static ssize_t policy_update(u32 mask, const char __user *buf, size_t size, error = aa_replace_profiles(ns, label, mask, data); aa_put_loaddata(data); } +end_section: end_current_label_crit_section(label); return error; diff --git a/security/apparmor/label.c b/security/apparmor/label.c index ea63710442ae58a92a4c38a0f6fa2dc3dfb20116..212a0f39ddae8e4c1535562d3cc9a8f58c0cc392 100644 --- a/security/apparmor/label.c +++ b/security/apparmor/label.c @@ -1536,13 +1536,13 @@ static const char *label_modename(struct aa_ns *ns, struct aa_label *label, label_for_each(i, label, profile) { if (aa_ns_visible(ns, profile->ns, flags & FLAG_VIEW_SUBNS)) { - if (profile->mode == APPARMOR_UNCONFINED) + count++; + if (profile == profile->ns->unconfined) /* special case unconfined so stacks with * unconfined don't report as mixed. ie. * profile_foo//&:ns1:unconfined (mixed) */ continue; - count++; if (mode == -1) mode = profile->mode; else if (mode != profile->mode) diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c index c58df3375390d1fa04f3c6b6991ac014eff85026..b9dcf7ec95a01b572b506db090bc0b3597847055 100644 --- a/security/apparmor/lsm.c +++ b/security/apparmor/lsm.c @@ -123,11 +123,11 @@ static int apparmor_ptrace_traceme(struct task_struct *parent) struct aa_label *tracer, *tracee; int error; - tracee = begin_current_label_crit_section(); + tracee = __begin_current_label_crit_section(); tracer = aa_get_task_label(parent); error = aa_may_ptrace(tracer, tracee, AA_PTRACE_TRACE); aa_put_label(tracer); - end_current_label_crit_section(tracee); + __end_current_label_crit_section(tracee); return error; } diff --git a/security/commoncap.c b/security/commoncap.c index 807d8010d095e25ab46a4b708010a8b42e455294..705375e315bf8f13a2714b59967c096aecd6f75f 100644 --- a/security/commoncap.c +++ b/security/commoncap.c @@ -723,6 +723,7 @@ int cap_bprm_set_creds(struct linux_binprm *bprm) int ret; kuid_t root_uid; + new->cap_ambient = old->cap_ambient; if (WARN_ON(!cap_ambient_invariant_ok(old))) return -EPERM; diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c index f1f030ae363baf024599ca7beb557f8cde6a9625..d5843cfa83e7c51936b101eab1de6c37303c3784 100644 --- a/security/integrity/evm/evm_crypto.c +++ b/security/integrity/evm/evm_crypto.c @@ -90,7 +90,7 @@ static struct shash_desc *init_desc(char type) algo = evm_hash; } - if (*tfm == NULL) { + if (IS_ERR_OR_NULL(*tfm)) { mutex_lock(&mutex); if (*tfm) goto out; @@ -240,7 +240,7 @@ static int evm_calc_hmac_or_hash(struct dentry *dentry, /* Portable EVM signatures must include an IMA hash */ if (type == EVM_XATTR_PORTABLE_DIGSIG && !ima_present) - return -EPERM; + error = -EPERM; out: kfree(xattr_value); kfree(desc); diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h index d52b487ad25955fb553813746e0d0c485434221f..186a3158edef3b02f3416e0073ccfa2a16da7bf9 100644 --- a/security/integrity/ima/ima.h +++ b/security/integrity/ima/ima.h @@ -40,7 +40,7 @@ enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8 }; #define IMA_DIGEST_SIZE SHA1_DIGEST_SIZE #define IMA_EVENT_NAME_LEN_MAX 255 -#define IMA_HASH_BITS 9 +#define IMA_HASH_BITS 10 #define IMA_MEASURE_HTABLE_SIZE (1 << IMA_HASH_BITS) #define IMA_TEMPLATE_FIELD_ID_MAX_LEN 16 @@ -167,9 +167,10 @@ struct ima_h_table { }; extern struct ima_h_table ima_htable; -static inline unsigned long ima_hash_key(u8 *digest) +static inline unsigned int ima_hash_key(u8 *digest) { - return hash_long(*digest, IMA_HASH_BITS); + /* there is no point in taking a hash of part of a digest */ + return (digest[0] | digest[1] << 8) % IMA_MEASURE_HTABLE_SIZE; } #define __ima_hooks(hook) \ diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c index 06b0ee75f34fbeb9b265abdf6b52061e2a42fc87..7b16e54f01c6049286bc8007b788c5ef43aa5b28 100644 --- a/security/integrity/ima/ima_crypto.c +++ b/security/integrity/ima/ima_crypto.c @@ -432,7 +432,7 @@ int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash) loff_t i_size; int rc; struct file *f = file; - bool new_file_instance = false, modified_flags = false; + bool new_file_instance = false, modified_mode = false; /* * For consistency, fail file's opened with the O_DIRECT flag on @@ -452,13 +452,13 @@ int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash) f = dentry_open(&file->f_path, flags, file->f_cred); if (IS_ERR(f)) { /* - * Cannot open the file again, lets modify f_flags + * Cannot open the file again, lets modify f_mode * of original and continue */ pr_info_ratelimited("Unable to reopen file for reading.\n"); f = file; - f->f_flags |= FMODE_READ; - modified_flags = true; + f->f_mode |= FMODE_READ; + modified_mode = true; } else { new_file_instance = true; } @@ -476,8 +476,8 @@ int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash) out: if (new_file_instance) fput(f); - else if (modified_flags) - f->f_flags &= ~FMODE_READ; + else if (modified_mode) + f->f_mode &= ~FMODE_READ; return rc; } diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c index 2c4e83f6409e3f3ff4fd2fe1be98334addddbd41..d37f9ac46670dd287b011cdfd42c9a76d823d5c5 100644 --- a/security/integrity/ima/ima_fs.c +++ b/security/integrity/ima/ima_fs.c @@ -340,8 +340,7 @@ static ssize_t ima_write_policy(struct file *file, const char __user *buf, integrity_audit_msg(AUDIT_INTEGRITY_STATUS, NULL, NULL, "policy_update", "signed policy required", 1, 0); - if (ima_appraise & IMA_APPRAISE_ENFORCE) - result = -EACCES; + result = -EACCES; } else { result = ima_parse_add_rule(data); } diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c index 4c160bee67f78548f0d9085b2d5efd6330bfc305..46b0bd6b3d620de58a9fc0080f1fea0bab5b7509 100644 --- a/security/integrity/ima/ima_policy.c +++ b/security/integrity/ima/ima_policy.c @@ -170,7 +170,7 @@ static struct ima_rule_entry secure_boot_rules[] __ro_after_init = { static LIST_HEAD(ima_default_rules); static LIST_HEAD(ima_policy_rules); static LIST_HEAD(ima_temp_rules); -static struct list_head *ima_rules; +static struct list_head *ima_rules = &ima_default_rules; static int ima_policy __initdata; @@ -468,7 +468,6 @@ void __init ima_init_policy(void) temp_ima_appraise |= IMA_APPRAISE_POLICY; } - ima_rules = &ima_default_rules; ima_update_policy_flag(); } diff --git a/security/keys/internal.h b/security/keys/internal.h index 7ed723d858499ea9cbe0a580e3fa2ea459f400f3..c40fbef50f29719a961cb0cdcd7a3299e2db4484 100644 --- a/security/keys/internal.h +++ b/security/keys/internal.h @@ -304,15 +304,4 @@ static inline void key_check(const struct key *key) #define key_check(key) do {} while(0) #endif - -/* - * Helper function to clear and free a kvmalloc'ed memory object. - */ -static inline void __kvzfree(const void *addr, size_t len) -{ - if (addr) { - memset((void *)addr, 0, len); - kvfree(addr); - } -} #endif /* _INTERNAL_H */ diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c index c07c2e2b2478399328d3d8b445adc2d837079257..9394d72a77e80a9115aa60e6ba374f7bb1bdf339 100644 --- a/security/keys/keyctl.c +++ b/security/keys/keyctl.c @@ -133,10 +133,7 @@ SYSCALL_DEFINE5(add_key, const char __user *, _type, key_ref_put(keyring_ref); error3: - if (payload) { - memzero_explicit(payload, plen); - kvfree(payload); - } + kvfree_sensitive(payload, plen); error2: kfree(description); error: @@ -351,7 +348,7 @@ long keyctl_update_key(key_serial_t id, key_ref_put(key_ref); error2: - __kvzfree(payload, plen); + kvfree_sensitive(payload, plen); error: return ret; } @@ -859,7 +856,7 @@ long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen) */ if (ret > key_data_len) { if (unlikely(key_data)) - __kvzfree(key_data, key_data_len); + kvfree_sensitive(key_data, key_data_len); key_data_len = ret; continue; /* Allocate buffer */ } @@ -868,7 +865,7 @@ long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen) ret = -EFAULT; break; } - __kvzfree(key_data, key_data_len); + kvfree_sensitive(key_data, key_data_len); key_put_out: key_put(key); @@ -1170,10 +1167,7 @@ long keyctl_instantiate_key_common(key_serial_t id, keyctl_change_reqkey_auth(NULL); error2: - if (payload) { - memzero_explicit(payload, plen); - kvfree(payload); - } + kvfree_sensitive(payload, plen); error: return ret; } diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c index 98c418060032bf0b008190db28db99019efe330a..289a9f5672a4ea9d92d71652a9150891110ac6bc 100644 --- a/security/selinux/ss/services.c +++ b/security/selinux/ss/services.c @@ -2877,8 +2877,12 @@ int security_get_bools(struct selinux_state *state, if (*names) { for (i = 0; i < *len; i++) kfree((*names)[i]); + kfree(*names); } kfree(*values); + *len = 0; + *names = NULL; + *values = NULL; goto out; } diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c index f6482e53d55a82d97882d40d04736247281a4239..371ae368da3555204ea12f7965b077ddce91bc03 100644 --- a/security/smack/smackfs.c +++ b/security/smack/smackfs.c @@ -906,11 +906,21 @@ static ssize_t smk_set_cipso(struct file *file, const char __user *buf, else rule += strlen(skp->smk_known) + 1; + if (rule > data + count) { + rc = -EOVERFLOW; + goto out; + } + ret = sscanf(rule, "%d", &maplevel); if (ret != 1 || maplevel > SMACK_CIPSO_MAXLEVEL) goto out; rule += SMK_DIGITLEN; + if (rule > data + count) { + rc = -EOVERFLOW; + goto out; + } + ret = sscanf(rule, "%d", &catlen); if (ret != 1 || catlen > SMACK_CIPSO_MAXCATNUM) goto out; diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c index 77c7e91112f983062984b16f0c3e6f82a243f1ed..9296eb6c4ce8ae96ac49a2b4d77cc05ee1d047f4 100644 --- a/sound/core/compress_offload.c +++ b/sound/core/compress_offload.c @@ -729,6 +729,9 @@ static int snd_compr_stop(struct snd_compr_stream *stream) if (!retval) { stream->runtime->state = SNDRV_PCM_STATE_SETUP; wake_up(&stream->runtime->sleep); + /* clear flags and stop any drain wait */ + stream->partial_drain = false; + stream->metadata_set = false; stream->runtime->total_bytes_available = 0; stream->runtime->total_bytes_transferred = 0; } @@ -864,6 +867,7 @@ static int snd_compr_partial_drain(struct snd_compr_stream *stream) if (stream->next_track == false) return -EPERM; + stream->partial_drain = true; retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_PARTIAL_DRAIN); stream->next_track = false; diff --git a/sound/core/hwdep.c b/sound/core/hwdep.c index a73baa1242beeaf6c714f4c0986fd057286d50d6..727219f4020113511de237593e32789edee53b1b 100644 --- a/sound/core/hwdep.c +++ b/sound/core/hwdep.c @@ -229,14 +229,14 @@ static int snd_hwdep_dsp_load(struct snd_hwdep *hw, if (copy_from_user(&info, _info, sizeof(info))) return -EFAULT; /* check whether the dsp was already loaded */ - if (hw->dsp_loaded & (1 << info.index)) + if (hw->dsp_loaded & (1u << info.index)) return -EBUSY; if (!access_ok(VERIFY_READ, info.image, info.length)) return -EFAULT; err = hw->ops.dsp_load(hw, &info); if (err < 0) return err; - hw->dsp_loaded |= (1 << info.index); + hw->dsp_loaded |= (1u << info.index); return 0; } diff --git a/sound/core/info.c b/sound/core/info.c index f895fa3303b72d1f710c8755d85f5359693bafc9..d6dc7de0831fc9fef3790617f9e25b994504cd26 100644 --- a/sound/core/info.c +++ b/sound/core/info.c @@ -645,7 +645,9 @@ int snd_info_get_line(struct snd_info_buffer *buffer, char *line, int len) { int c = -1; - if (snd_BUG_ON(!buffer || !buffer->buffer)) + if (snd_BUG_ON(!buffer)) + return 1; + if (!buffer->buffer) return 1; if (len <= 0 || buffer->stop || buffer->error) return 1; diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c index 33fb05363a163e1f625dc383b98c5a5aadf89bf9..6461bc8708f360d1a4ac897be1f99eb011ee57b4 100644 --- a/sound/core/pcm_lib.c +++ b/sound/core/pcm_lib.c @@ -444,6 +444,7 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream, no_delta_check: if (runtime->status->hw_ptr == new_hw_ptr) { + runtime->hw_ptr_jiffies = curr_jiffies; update_audio_tstamp(substream, &curr_tstamp, &audio_tstamp); return 0; } diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c index 93da68ba9b8193f458b7a6c6d115bc2a66e5e789..dc582c29abf70c0189033750bd5732c0d142a98b 100644 --- a/sound/core/pcm_native.c +++ b/sound/core/pcm_native.c @@ -1978,6 +1978,11 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd) } pcm_file = f.file->private_data; substream1 = pcm_file->substream; + if (substream == substream1) { + res = -EINVAL; + goto _badf; + } + group = kmalloc(sizeof(*group), GFP_KERNEL); if (!group) { res = -ENOMEM; diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c index 22faa6f8df86f1b083565883d84d1a8f1cb52b85..336c507b9c24f166f2234c70e276ee5aed1679a4 100644 --- a/sound/core/rawmidi.c +++ b/sound/core/rawmidi.c @@ -108,6 +108,17 @@ static void snd_rawmidi_input_event_work(struct work_struct *work) runtime->event(runtime->substream); } +/* buffer refcount management: call with runtime->lock held */ +static inline void snd_rawmidi_buffer_ref(struct snd_rawmidi_runtime *runtime) +{ + runtime->buffer_ref++; +} + +static inline void snd_rawmidi_buffer_unref(struct snd_rawmidi_runtime *runtime) +{ + runtime->buffer_ref--; +} + static int snd_rawmidi_runtime_create(struct snd_rawmidi_substream *substream) { struct snd_rawmidi_runtime *runtime; @@ -126,7 +137,7 @@ static int snd_rawmidi_runtime_create(struct snd_rawmidi_substream *substream) runtime->avail = 0; else runtime->avail = runtime->buffer_size; - if ((runtime->buffer = kmalloc(runtime->buffer_size, GFP_KERNEL)) == NULL) { + if ((runtime->buffer = kzalloc(runtime->buffer_size, GFP_KERNEL)) == NULL) { kfree(runtime); return -ENOMEM; } @@ -661,6 +672,11 @@ int snd_rawmidi_output_params(struct snd_rawmidi_substream *substream, return -ENOMEM; } spin_lock_irqsave(&runtime->lock, flags); + if (runtime->buffer_ref) { + spin_unlock_irq(&runtime->lock); + kfree(newbuf); + return -EBUSY; + } oldbuf = runtime->buffer; runtime->buffer = newbuf; runtime->buffer_size = params->buffer_size; @@ -977,10 +993,12 @@ static long snd_rawmidi_kernel_read1(struct snd_rawmidi_substream *substream, long result = 0, count1; struct snd_rawmidi_runtime *runtime = substream->runtime; unsigned long appl_ptr; + int err = 0; if (userbuf) mutex_lock(&runtime->realloc_mutex); spin_lock_irqsave(&runtime->lock, flags); + snd_rawmidi_buffer_ref(runtime); while (count > 0 && runtime->avail) { count1 = runtime->buffer_size - runtime->appl_ptr; if (count1 > count) @@ -1001,17 +1019,21 @@ static long snd_rawmidi_kernel_read1(struct snd_rawmidi_substream *substream, if (copy_to_user(userbuf + result, runtime->buffer + appl_ptr, count1)) { mutex_unlock(&runtime->realloc_mutex); - return result > 0 ? result : -EFAULT; + err = -EFAULT; } spin_lock_irqsave(&runtime->lock, flags); + if (err) + goto out; } result += count1; count -= count1; } + out: + snd_rawmidi_buffer_unref(runtime); spin_unlock_irqrestore(&runtime->lock, flags); if (userbuf) mutex_unlock(&runtime->realloc_mutex); - return result; + return result > 0 ? result : err; } long snd_rawmidi_kernel_read(struct snd_rawmidi_substream *substream, @@ -1286,6 +1308,7 @@ static long snd_rawmidi_kernel_write1(struct snd_rawmidi_substream *substream, return -EAGAIN; } } + snd_rawmidi_buffer_ref(runtime); while (count > 0 && runtime->avail > 0) { count1 = runtime->buffer_size - runtime->appl_ptr; if (count1 > count) @@ -1317,6 +1340,7 @@ static long snd_rawmidi_kernel_write1(struct snd_rawmidi_substream *substream, } __end: count1 = runtime->avail < runtime->buffer_size; + snd_rawmidi_buffer_unref(runtime); spin_unlock_irqrestore(&runtime->lock, flags); if (userbuf) mutex_unlock(&runtime->realloc_mutex); diff --git a/sound/drivers/opl3/opl3_synth.c b/sound/drivers/opl3/opl3_synth.c index 42920a2433282befccd62aaa43bbec949fe22e6c..3f94746d587aeb38bb67236bef7bc9ec7b061c0d 100644 --- a/sound/drivers/opl3/opl3_synth.c +++ b/sound/drivers/opl3/opl3_synth.c @@ -104,6 +104,8 @@ int snd_opl3_ioctl(struct snd_hwdep * hw, struct file *file, { struct snd_dm_fm_info info; + memset(&info, 0, sizeof(info)); + info.fm_mode = opl3->fm_mode; info.rhythm = opl3->rhythm; if (copy_to_user(argp, &info, sizeof(struct snd_dm_fm_info))) diff --git a/sound/isa/es1688/es1688.c b/sound/isa/es1688/es1688.c index a826c138e7f555ec3a0e3345265f646bc3ff135e..8a58ed1687562d3aa9ceb5292184f3ed81ce8dde 100644 --- a/sound/isa/es1688/es1688.c +++ b/sound/isa/es1688/es1688.c @@ -284,8 +284,10 @@ static int snd_es968_pnp_detect(struct pnp_card_link *pcard, return error; } error = snd_es1688_probe(card, dev); - if (error < 0) + if (error < 0) { + snd_card_free(card); return error; + } pnp_set_card_drvdata(pcard, card); snd_es968_pnp_is_probed = 1; return 0; diff --git a/sound/isa/wavefront/wavefront_synth.c b/sound/isa/wavefront/wavefront_synth.c index 0b1e4b34b29965360843fd26994a034530b54725..13c8e6542a2fc8089f1e7d2e5251818ce4e04e78 100644 --- a/sound/isa/wavefront/wavefront_synth.c +++ b/sound/isa/wavefront/wavefront_synth.c @@ -1175,7 +1175,10 @@ wavefront_send_alias (snd_wavefront_t *dev, wavefront_patch_info *header) "alias for %d\n", header->number, header->hdr.a.OriginalSample); - + + if (header->number >= WF_MAX_SAMPLE) + return -EINVAL; + munge_int32 (header->number, &alias_hdr[0], 2); munge_int32 (header->hdr.a.OriginalSample, &alias_hdr[2], 2); munge_int32 (*((unsigned int *)&header->hdr.a.sampleStartOffset), @@ -1206,6 +1209,9 @@ wavefront_send_multisample (snd_wavefront_t *dev, wavefront_patch_info *header) int num_samples; unsigned char *msample_hdr; + if (header->number >= WF_MAX_SAMPLE) + return -EINVAL; + msample_hdr = kmalloc(WF_MSAMPLE_BYTES, GFP_KERNEL); if (! msample_hdr) return -ENOMEM; diff --git a/sound/pci/hda/hda_auto_parser.c b/sound/pci/hda/hda_auto_parser.c index 8b1cf237b96ec4c4458f1471b250e071d0af1f30..c5dc8587d2ac9a70e5810bd7f0dd35208ced4eed 100644 --- a/sound/pci/hda/hda_auto_parser.c +++ b/sound/pci/hda/hda_auto_parser.c @@ -76,6 +76,12 @@ static int compare_input_type(const void *ap, const void *bp) if (a->type != b->type) return (int)(a->type - b->type); + /* If has both hs_mic and hp_mic, pick the hs_mic ahead of hp_mic. */ + if (a->is_headset_mic && b->is_headphone_mic) + return -1; /* don't swap */ + else if (a->is_headphone_mic && b->is_headset_mic) + return 1; /* swap */ + /* In case one has boost and the other one has not, pick the one with boost first. */ return (int)(b->has_boost_on_pin - a->has_boost_on_pin); diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 7779f54607156d7c5e36a412fe3752b72cfe1f63..e399c5718ee605c32640d83ab2a891163f197445 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -1282,6 +1282,7 @@ static void azx_vs_set_state(struct pci_dev *pci, struct snd_card *card = pci_get_drvdata(pci); struct azx *chip = card->private_data; struct hda_intel *hda = container_of(chip, struct hda_intel, chip); + struct hda_codec *codec; bool disabled; wait_for_completion(&hda->probe_wait); @@ -1306,8 +1307,12 @@ static void azx_vs_set_state(struct pci_dev *pci, dev_info(chip->card->dev, "%s via vga_switcheroo\n", disabled ? "Disabling" : "Enabling"); if (disabled) { - pm_runtime_put_sync_suspend(card->dev); - azx_suspend(card->dev); + list_for_each_codec(codec, &chip->bus) { + pm_runtime_suspend(hda_codec_dev(codec)); + pm_runtime_disable(hda_codec_dev(codec)); + } + pm_runtime_suspend(card->dev); + pm_runtime_disable(card->dev); /* when we get suspended by vga_switcheroo we end up in D3cold, * however we have no ACPI handle, so pci/acpi can't put us there, * put ourselves there */ @@ -1318,9 +1323,12 @@ static void azx_vs_set_state(struct pci_dev *pci, "Cannot lock devices!\n"); } else { snd_hda_unlock_devices(&chip->bus); - pm_runtime_get_noresume(card->dev); chip->disabled = false; - azx_resume(card->dev); + pm_runtime_enable(card->dev); + list_for_each_codec(codec, &chip->bus) { + pm_runtime_enable(hda_codec_dev(codec)); + pm_runtime_resume(hda_codec_dev(codec)); + } } } } @@ -1350,6 +1358,7 @@ static void init_vga_switcheroo(struct azx *chip) dev_info(chip->card->dev, "Handle vga_switcheroo audio client\n"); hda->use_vga_switcheroo = 1; + chip->driver_caps |= AZX_DCAPS_PM_RUNTIME; pci_dev_put(p); } } @@ -1375,9 +1384,6 @@ static int register_vga_switcheroo(struct azx *chip) return err; hda->vga_switcheroo_registered = 1; - /* register as an optimus hdmi audio power domain */ - vga_switcheroo_init_domain_pm_optimus_hdmi_audio(chip->card->dev, - &hda->hdmi_pm_domain); return 0; } #else @@ -1406,10 +1412,8 @@ static int azx_free(struct azx *chip) if (use_vga_switcheroo(hda)) { if (chip->disabled && hda->probe_continued) snd_hda_unlock_devices(&chip->bus); - if (hda->vga_switcheroo_registered) { + if (hda->vga_switcheroo_registered) vga_switcheroo_unregister_client(chip->pci); - vga_switcheroo_fini_domain_pm_ops(chip->card->dev); - } } if (bus->chip_init) { @@ -2301,6 +2305,7 @@ static int azx_probe_continue(struct azx *chip) struct hda_intel *hda = container_of(chip, struct hda_intel, chip); struct hdac_bus *bus = azx_bus(chip); struct pci_dev *pci = chip->pci; + struct hda_codec *codec; int dev = chip->dev_index; int val; int err; @@ -2385,6 +2390,14 @@ static int azx_probe_continue(struct azx *chip) chip->running = 1; azx_add_card_list(chip); + /* + * The discrete GPU cannot power down unless the HDA controller runtime + * suspends, so activate runtime PM on codecs even if power_save == 0. + */ + if (use_vga_switcheroo(hda)) + list_for_each_codec(codec, &chip->bus) + codec->auto_runtime_pm = 1; + val = power_save; #ifdef CONFIG_PM if (pm_blacklist) { @@ -2399,7 +2412,7 @@ static int azx_probe_continue(struct azx *chip) } #endif /* CONFIG_PM */ snd_hda_set_power_save(&chip->bus, val * 1000); - if (azx_has_pm_runtime(chip) || hda->use_vga_switcheroo) + if (azx_has_pm_runtime(chip)) pm_runtime_put_autosuspend(&pci->dev); out_free: diff --git a/sound/pci/hda/hda_intel.h b/sound/pci/hda/hda_intel.h index ff0c4d617bc1dbba882b8e1c09d208a71be84bf3..e3a3d318d2e5f9fc57dbe9530a2470ab955b1765 100644 --- a/sound/pci/hda/hda_intel.h +++ b/sound/pci/hda/hda_intel.h @@ -40,9 +40,6 @@ struct hda_intel { unsigned int vga_switcheroo_registered:1; unsigned int init_failed:1; /* delayed init failed */ - /* secondary power domain for hdmi audio under vga device */ - struct dev_pm_domain hdmi_pm_domain; - bool need_i915_power:1; /* the hda controller needs i915 power */ }; diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index 435c0efb9bf29205c240e3d4f807a94376fc4215..9e8cfc409b4b326a3de307a1cce09255e05b2602 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -2212,7 +2212,9 @@ static int generic_hdmi_build_controls(struct hda_codec *codec) for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) { struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx); + struct hdmi_eld *pin_eld = &per_pin->sink_eld; + pin_eld->eld_valid = false; hdmi_present_sense(per_pin, 0); } @@ -3859,6 +3861,11 @@ HDA_CODEC_ENTRY(0x10de0095, "GPU 95 HDMI/DP", patch_nvhdmi), HDA_CODEC_ENTRY(0x10de0097, "GPU 97 HDMI/DP", patch_nvhdmi), HDA_CODEC_ENTRY(0x10de0098, "GPU 98 HDMI/DP", patch_nvhdmi), HDA_CODEC_ENTRY(0x10de0099, "GPU 99 HDMI/DP", patch_nvhdmi), +HDA_CODEC_ENTRY(0x10de009a, "GPU 9a HDMI/DP", patch_nvhdmi), +HDA_CODEC_ENTRY(0x10de009d, "GPU 9d HDMI/DP", patch_nvhdmi), +HDA_CODEC_ENTRY(0x10de009e, "GPU 9e HDMI/DP", patch_nvhdmi), +HDA_CODEC_ENTRY(0x10de009f, "GPU 9f HDMI/DP", patch_nvhdmi), +HDA_CODEC_ENTRY(0x10de00a0, "GPU a0 HDMI/DP", patch_nvhdmi), HDA_CODEC_ENTRY(0x10de8001, "MCP73 HDMI", patch_nvhdmi_2ch), HDA_CODEC_ENTRY(0x10de8067, "MCP67/68 HDMI", patch_nvhdmi_2ch), HDA_CODEC_ENTRY(0x11069f80, "VX900 HDMI/DP", patch_via_hdmi), diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index b2aec97414fb86c42a45ce924aa96cf8f8c16317..98110fd65b9bbccac8f137d34a7e722863193382 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -341,6 +341,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec) case 0x10ec0282: case 0x10ec0283: case 0x10ec0286: + case 0x10ec0287: case 0x10ec0288: case 0x10ec0285: case 0x10ec0298: @@ -4691,8 +4692,6 @@ static void alc_determine_headset_type(struct hda_codec *codec) is_ctia = (val & 0x1c02) == 0x1c02; break; case 0x10ec0225: - codec->power_save_node = 1; - /* fall through */ case 0x10ec0295: case 0x10ec0299: alc_process_coef_fw(codec, alc225_pre_hsmode); @@ -5354,6 +5353,15 @@ static void alc233_alc662_fixup_lenovo_dual_codecs(struct hda_codec *codec, } } +static void alc225_fixup_s3_pop_noise(struct hda_codec *codec, + const struct hda_fixup *fix, int action) +{ + if (action != HDA_FIXUP_ACT_PRE_PROBE) + return; + + codec->power_save_node = 1; +} + /* Forcibly assign NID 0x03 to HP/LO while NID 0x02 to SPK for EQ */ static void alc274_fixup_bind_dacs(struct hda_codec *codec, const struct hda_fixup *fix, int action) @@ -5424,6 +5432,7 @@ enum { ALC269_FIXUP_HP_LINE1_MIC1_LED, ALC269_FIXUP_INV_DMIC, ALC269_FIXUP_LENOVO_DOCK, + ALC269_FIXUP_LENOVO_DOCK_LIMIT_BOOST, ALC269_FIXUP_NO_SHUTUP, ALC286_FIXUP_SONY_MIC_NO_PRESENCE, ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT, @@ -5507,6 +5516,7 @@ enum { ALC233_FIXUP_LENOVO_MULTI_CODECS, ALC294_FIXUP_LENOVO_MIC_LOCATION, ALC225_FIXUP_DELL_WYSE_MIC_NO_PRESENCE, + ALC225_FIXUP_S3_POP_NOISE, ALC700_FIXUP_INTEL_REFERENCE, ALC274_FIXUP_DELL_BIND_DACS, ALC274_FIXUP_DELL_AIO_LINEOUT_VERB, @@ -5716,6 +5726,12 @@ static const struct hda_fixup alc269_fixups[] = { .chained = true, .chain_id = ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT }, + [ALC269_FIXUP_LENOVO_DOCK_LIMIT_BOOST] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc269_fixup_limit_int_mic_boost, + .chained = true, + .chain_id = ALC269_FIXUP_LENOVO_DOCK, + }, [ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT] = { .type = HDA_FIXUP_FUNC, .v.func = alc269_fixup_pincfg_no_hp_to_lineout, @@ -6339,6 +6355,12 @@ static const struct hda_fixup alc269_fixups[] = { { } }, .chained = true, + .chain_id = ALC225_FIXUP_S3_POP_NOISE + }, + [ALC225_FIXUP_S3_POP_NOISE] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc225_fixup_s3_pop_noise, + .chained = true, .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC }, [ALC700_FIXUP_INTEL_REFERENCE] = { @@ -6596,7 +6618,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE), SND_PCI_QUIRK(0x17aa, 0x21ca, "Thinkpad L412", ALC269_FIXUP_SKU_IGNORE), SND_PCI_QUIRK(0x17aa, 0x21e9, "Thinkpad Edge 15", ALC269_FIXUP_SKU_IGNORE), - SND_PCI_QUIRK(0x17aa, 0x21f6, "Thinkpad T530", ALC269_FIXUP_LENOVO_DOCK), + SND_PCI_QUIRK(0x17aa, 0x21f6, "Thinkpad T530", ALC269_FIXUP_LENOVO_DOCK_LIMIT_BOOST), SND_PCI_QUIRK(0x17aa, 0x21fa, "Thinkpad X230", ALC269_FIXUP_LENOVO_DOCK), SND_PCI_QUIRK(0x17aa, 0x21f3, "Thinkpad T430", ALC269_FIXUP_LENOVO_DOCK), SND_PCI_QUIRK(0x17aa, 0x21fb, "Thinkpad T430s", ALC269_FIXUP_LENOVO_DOCK), @@ -6728,6 +6750,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = { {.id = ALC269_FIXUP_HEADSET_MODE, .name = "headset-mode"}, {.id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC, .name = "headset-mode-no-hp-mic"}, {.id = ALC269_FIXUP_LENOVO_DOCK, .name = "lenovo-dock"}, + {.id = ALC269_FIXUP_LENOVO_DOCK_LIMIT_BOOST, .name = "lenovo-dock-limit-boost"}, {.id = ALC269_FIXUP_HP_GPIO_LED, .name = "hp-gpio-led"}, {.id = ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED, .name = "hp-dock-gpio-mic1-led"}, {.id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "dell-headset-multi"}, @@ -7108,6 +7131,12 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { ALC225_STANDARD_PINS, {0x12, 0xb7a60130}, {0x17, 0x90170110}), + SND_HDA_PIN_QUIRK(0x10ec0623, 0x17aa, "Lenovo", ALC283_FIXUP_HEADSET_MIC, + {0x14, 0x01014010}, + {0x17, 0x90170120}, + {0x18, 0x02a11030}, + {0x19, 0x02a1103f}, + {0x21, 0x0221101f}), {} }; @@ -7268,6 +7297,7 @@ static int patch_alc269(struct hda_codec *codec) case 0x10ec0215: case 0x10ec0245: case 0x10ec0285: + case 0x10ec0287: case 0x10ec0289: spec->codec_variant = ALC269_TYPE_ALC215; spec->gen.mixer_nid = 0; @@ -8367,6 +8397,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = { HDA_CODEC_ENTRY(0x10ec0284, "ALC284", patch_alc269), HDA_CODEC_ENTRY(0x10ec0285, "ALC285", patch_alc269), HDA_CODEC_ENTRY(0x10ec0286, "ALC286", patch_alc269), + HDA_CODEC_ENTRY(0x10ec0287, "ALC287", patch_alc269), HDA_CODEC_ENTRY(0x10ec0288, "ALC288", patch_alc269), HDA_CODEC_ENTRY(0x10ec0289, "ALC289", patch_alc269), HDA_CODEC_ENTRY(0x10ec0290, "ALC290", patch_alc269), diff --git a/sound/pci/ice1712/ice1712.c b/sound/pci/ice1712/ice1712.c index 0e66afa403a37ca25908a0595cca2c32efc2307b..5a7928e1b29ed58bcbb4605b8e299ce1ab7ced47 100644 --- a/sound/pci/ice1712/ice1712.c +++ b/sound/pci/ice1712/ice1712.c @@ -2377,7 +2377,8 @@ static int snd_ice1712_chip_init(struct snd_ice1712 *ice) pci_write_config_byte(ice->pci, 0x61, ice->eeprom.data[ICE_EEP1_ACLINK]); pci_write_config_byte(ice->pci, 0x62, ice->eeprom.data[ICE_EEP1_I2SID]); pci_write_config_byte(ice->pci, 0x63, ice->eeprom.data[ICE_EEP1_SPDIF]); - if (ice->eeprom.subvendor != ICE1712_SUBDEVICE_STDSP24) { + if (ice->eeprom.subvendor != ICE1712_SUBDEVICE_STDSP24 && + ice->eeprom.subvendor != ICE1712_SUBDEVICE_STAUDIO_ADCIII) { ice->gpio.write_mask = ice->eeprom.gpiomask; ice->gpio.direction = ice->eeprom.gpiodir; snd_ice1712_write(ice, ICE1712_IREG_GPIO_WRITE_MASK, diff --git a/sound/soc/codecs/rt5670.h b/sound/soc/codecs/rt5670.h index 5ba485cae4e663a57a5f7419a36f0cf5789f5ed9..06d7c0aaeb612a0dcba968c08767c0ae25a7769c 100644 --- a/sound/soc/codecs/rt5670.h +++ b/sound/soc/codecs/rt5670.h @@ -760,7 +760,7 @@ #define RT5670_PWR_VREF2_BIT 4 #define RT5670_PWR_FV2 (0x1 << 3) #define RT5670_PWR_FV2_BIT 3 -#define RT5670_LDO_SEL_MASK (0x3) +#define RT5670_LDO_SEL_MASK (0x7) #define RT5670_LDO_SEL_SFT 0 /* Power Management for Analog 2 (0x64) */ diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c index e10e03800cce52d76ca7c6f6ca4b83e73835b2f1..6991718d7c8a2303f6265b89a3798de7bbe508a2 100644 --- a/sound/soc/davinci/davinci-mcasp.c +++ b/sound/soc/davinci/davinci-mcasp.c @@ -1747,8 +1747,10 @@ static int davinci_mcasp_get_dma_type(struct davinci_mcasp *mcasp) PTR_ERR(chan)); return PTR_ERR(chan); } - if (WARN_ON(!chan->device || !chan->device->dev)) + if (WARN_ON(!chan->device || !chan->device->dev)) { + dma_release_channel(chan); return -EINVAL; + } if (chan->device->dev->of_node) ret = of_property_read_string(chan->device->dev->of_node, diff --git a/sound/soc/fsl/fsl_asrc_dma.c b/sound/soc/fsl/fsl_asrc_dma.c index e1b97e59275a9ebdc31f85cc7e8aa883b0ad1700..15d7e6da05556e1bb41205cf9b4db6ceb67a19db 100644 --- a/sound/soc/fsl/fsl_asrc_dma.c +++ b/sound/soc/fsl/fsl_asrc_dma.c @@ -243,6 +243,7 @@ static int fsl_asrc_dma_hw_params(struct snd_pcm_substream *substream, ret = dmaengine_slave_config(pair->dma_chan[dir], &config_be); if (ret) { dev_err(dev, "failed to config DMA channel for Back-End\n"); + dma_release_channel(pair->dma_chan[dir]); return ret; } diff --git a/sound/soc/rockchip/rockchip_pdm.c b/sound/soc/rockchip/rockchip_pdm.c index 8a2e3bbce3a165515f8def7c83e25e4b905b1e2d..ad16c8310dd386337a79636301289246ca6e7a4d 100644 --- a/sound/soc/rockchip/rockchip_pdm.c +++ b/sound/soc/rockchip/rockchip_pdm.c @@ -478,8 +478,10 @@ static int rockchip_pdm_resume(struct device *dev) int ret; ret = pm_runtime_get_sync(dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put(dev); return ret; + } ret = regcache_sync(pdm->regmap); diff --git a/sound/usb/card.c b/sound/usb/card.c index c592b6c2bf5bbdc55c60db3edcf8487db1507181..f32fa88c8b16642d82d0163a31a076765ccfa8f2 100644 --- a/sound/usb/card.c +++ b/sound/usb/card.c @@ -867,9 +867,6 @@ static int usb_audio_suspend(struct usb_interface *intf, pm_message_t message) if (chip == (void *)-1L) return 0; - chip->autosuspended = !!PMSG_IS_AUTO(message); - if (!chip->autosuspended) - snd_power_change_state(chip->card, SNDRV_CTL_POWER_D3hot); if (!chip->num_suspended_intf++) { list_for_each_entry(as, &chip->pcm_list, list) { snd_pcm_suspend_all(as->pcm); @@ -882,6 +879,11 @@ static int usb_audio_suspend(struct usb_interface *intf, pm_message_t message) snd_usb_mixer_suspend(mixer); } + if (!PMSG_IS_AUTO(message) && !chip->system_suspend) { + snd_power_change_state(chip->card, SNDRV_CTL_POWER_D3hot); + chip->system_suspend = chip->num_suspended_intf; + } + return 0; } @@ -894,10 +896,11 @@ static int __usb_audio_resume(struct usb_interface *intf, bool reset_resume) if (chip == (void *)-1L) return 0; - if (--chip->num_suspended_intf) - return 0; atomic_inc(&chip->active); /* avoid autopm */ + if (chip->num_suspended_intf > 1) + goto out; + /* * ALSA leaves material resumption to user space * we just notify and restart the mixers @@ -912,9 +915,12 @@ static int __usb_audio_resume(struct usb_interface *intf, bool reset_resume) snd_usbmidi_resume(p); } - if (!chip->autosuspended) + out: + if (chip->num_suspended_intf == chip->system_suspend) { snd_power_change_state(chip->card, SNDRV_CTL_POWER_D0); - chip->autosuspended = 0; + chip->system_suspend = 0; + } + chip->num_suspended_intf--; err_out: atomic_dec(&chip->active); /* allow autopm after this point */ diff --git a/sound/usb/line6/capture.c b/sound/usb/line6/capture.c index 7c812565f90db03f5c11fbcf3f92854b0eb3e772..a65a82d5791d52e58728f78968dbd15a5e9d77a3 100644 --- a/sound/usb/line6/capture.c +++ b/sound/usb/line6/capture.c @@ -291,6 +291,8 @@ int line6_create_audio_in_urbs(struct snd_line6_pcm *line6pcm) urb->interval = LINE6_ISO_INTERVAL; urb->error_count = 0; urb->complete = audio_in_callback; + if (usb_urb_ep_type_check(urb)) + return -EINVAL; } return 0; diff --git a/sound/usb/line6/playback.c b/sound/usb/line6/playback.c index 812d18191e018abc588c5a18306d413181e3af04..1736eb3ee98e5e8da3db266f8b4639a716159074 100644 --- a/sound/usb/line6/playback.c +++ b/sound/usb/line6/playback.c @@ -436,6 +436,8 @@ int line6_create_audio_out_urbs(struct snd_line6_pcm *line6pcm) urb->interval = LINE6_ISO_INTERVAL; urb->error_count = 0; urb->complete = audio_out_callback; + if (usb_urb_ep_type_check(urb)) + return -EINVAL; } return 0; diff --git a/sound/usb/midi.c b/sound/usb/midi.c index a92e2b2a91ecf54b2fdbfd4e1e6636e83fef778c..1bfae7a1c32f1475f091b1570b4d6d2a4066ef5b 100644 --- a/sound/usb/midi.c +++ b/sound/usb/midi.c @@ -1477,6 +1477,8 @@ void snd_usbmidi_disconnect(struct list_head *p) spin_unlock_irq(&umidi->disc_lock); up_write(&umidi->disc_rwsem); + del_timer_sync(&umidi->error_timer); + for (i = 0; i < MIDI_MAX_ENDPOINTS; ++i) { struct snd_usb_midi_endpoint *ep = &umidi->endpoints[i]; if (ep->out) @@ -1503,7 +1505,6 @@ void snd_usbmidi_disconnect(struct list_head *p) ep->in = NULL; } } - del_timer_sync(&umidi->error_timer); } EXPORT_SYMBOL(snd_usbmidi_disconnect); @@ -2260,16 +2261,22 @@ void snd_usbmidi_input_stop(struct list_head *p) } EXPORT_SYMBOL(snd_usbmidi_input_stop); -static void snd_usbmidi_input_start_ep(struct snd_usb_midi_in_endpoint *ep) +static void snd_usbmidi_input_start_ep(struct snd_usb_midi *umidi, + struct snd_usb_midi_in_endpoint *ep) { unsigned int i; + unsigned long flags; if (!ep) return; for (i = 0; i < INPUT_URBS; ++i) { struct urb *urb = ep->urbs[i]; - urb->dev = ep->umidi->dev; - snd_usbmidi_submit_urb(urb, GFP_KERNEL); + spin_lock_irqsave(&umidi->disc_lock, flags); + if (!atomic_read(&urb->use_count)) { + urb->dev = ep->umidi->dev; + snd_usbmidi_submit_urb(urb, GFP_ATOMIC); + } + spin_unlock_irqrestore(&umidi->disc_lock, flags); } } @@ -2285,7 +2292,7 @@ void snd_usbmidi_input_start(struct list_head *p) if (umidi->input_running || !umidi->opened[1]) return; for (i = 0; i < MIDI_MAX_ENDPOINTS; ++i) - snd_usbmidi_input_start_ep(umidi->endpoints[i].in); + snd_usbmidi_input_start_ep(umidi, umidi->endpoints[i].in); umidi->input_running = 1; } EXPORT_SYMBOL(snd_usbmidi_input_start); diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index 211c3648a45d18d29e363b74cb79b1439798f8f6..20f32c260fee4e1d4de07902789799136e1962e4 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c @@ -603,8 +603,9 @@ static int check_matrix_bitmap(unsigned char *bmap, * if failed, give up and free the control instance. */ -int snd_usb_mixer_add_control(struct usb_mixer_elem_list *list, - struct snd_kcontrol *kctl) +int snd_usb_mixer_add_list(struct usb_mixer_elem_list *list, + struct snd_kcontrol *kctl, + bool is_std_info) { struct usb_mixer_interface *mixer = list->mixer; int err; @@ -617,6 +618,7 @@ int snd_usb_mixer_add_control(struct usb_mixer_elem_list *list, return err; } list->kctl = kctl; + list->is_std_info = is_std_info; list->next_id_elem = mixer->id_elems[list->id]; mixer->id_elems[list->id] = list; return 0; @@ -1056,6 +1058,14 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval, cval->res = 384; } break; + case USB_ID(0x0495, 0x3042): /* ESS Technology Asus USB DAC */ + if ((strstr(kctl->id.name, "Playback Volume") != NULL) || + strstr(kctl->id.name, "Capture Volume") != NULL) { + cval->min >>= 8; + cval->max = 0; + cval->res = 1; + } + break; } } @@ -2742,15 +2752,23 @@ void snd_usb_mixer_notify_id(struct usb_mixer_interface *mixer, int unitid) { struct usb_mixer_elem_list *list; - for (list = mixer->id_elems[unitid]; list; list = list->next_id_elem) + for_each_mixer_elem(list, mixer, unitid) { + struct usb_mixer_elem_info *info; + + if (!list->is_std_info) + continue; + info = mixer_elem_list_to_info(list); + /* invalidate cache, so the value is read from the device */ + info->cached = 0; snd_ctl_notify(mixer->chip->card, SNDRV_CTL_EVENT_MASK_VALUE, &list->kctl->id); + } } static void snd_usb_mixer_dump_cval(struct snd_info_buffer *buffer, struct usb_mixer_elem_list *list) { - struct usb_mixer_elem_info *cval = (struct usb_mixer_elem_info *)list; + struct usb_mixer_elem_info *cval = mixer_elem_list_to_info(list); static char *val_types[] = {"BOOLEAN", "INV_BOOLEAN", "S8", "U8", "S16", "U16"}; snd_iprintf(buffer, " Info: id=%i, control=%i, cmask=0x%x, " @@ -2776,8 +2794,7 @@ static void snd_usb_mixer_proc_read(struct snd_info_entry *entry, mixer->ignore_ctl_error); snd_iprintf(buffer, "Card: %s\n", chip->card->longname); for (unitid = 0; unitid < MAX_ID_ELEMS; unitid++) { - for (list = mixer->id_elems[unitid]; list; - list = list->next_id_elem) { + for_each_mixer_elem(list, mixer, unitid) { snd_iprintf(buffer, " Unit: %i\n", list->id); if (list->kctl) snd_iprintf(buffer, @@ -2807,19 +2824,21 @@ static void snd_usb_mixer_interrupt_v2(struct usb_mixer_interface *mixer, return; } - for (list = mixer->id_elems[unitid]; list; list = list->next_id_elem) + for_each_mixer_elem(list, mixer, unitid) count++; if (count == 0) return; - for (list = mixer->id_elems[unitid]; list; list = list->next_id_elem) { + for_each_mixer_elem(list, mixer, unitid) { struct usb_mixer_elem_info *info; if (!list->kctl) continue; + if (!list->is_std_info) + continue; - info = (struct usb_mixer_elem_info *)list; + info = mixer_elem_list_to_info(list); if (count > 1 && info->control != control) continue; @@ -3042,7 +3061,7 @@ int snd_usb_mixer_suspend(struct usb_mixer_interface *mixer) static int restore_mixer_value(struct usb_mixer_elem_list *list) { - struct usb_mixer_elem_info *cval = (struct usb_mixer_elem_info *)list; + struct usb_mixer_elem_info *cval = mixer_elem_list_to_info(list); int c, err, idx; if (cval->cmask) { @@ -3078,8 +3097,7 @@ int snd_usb_mixer_resume(struct usb_mixer_interface *mixer, bool reset_resume) if (reset_resume) { /* restore cached mixer values */ for (id = 0; id < MAX_ID_ELEMS; id++) { - for (list = mixer->id_elems[id]; list; - list = list->next_id_elem) { + for_each_mixer_elem(list, mixer, id) { if (list->resume) { err = list->resume(list); if (err < 0) diff --git a/sound/usb/mixer.h b/sound/usb/mixer.h index ba27f7ade670ea009dbb2fc2d0bf50fb581a73c6..7c824a44589b0f93a8a413caa27f93ac3cdc08c0 100644 --- a/sound/usb/mixer.h +++ b/sound/usb/mixer.h @@ -49,10 +49,17 @@ struct usb_mixer_elem_list { struct usb_mixer_elem_list *next_id_elem; /* list of controls with same id */ struct snd_kcontrol *kctl; unsigned int id; + bool is_std_info; usb_mixer_elem_dump_func_t dump; usb_mixer_elem_resume_func_t resume; }; +/* iterate over mixer element list of the given unit id */ +#define for_each_mixer_elem(list, mixer, id) \ + for ((list) = (mixer)->id_elems[id]; (list); (list) = (list)->next_id_elem) +#define mixer_elem_list_to_info(list) \ + container_of(list, struct usb_mixer_elem_info, head) + struct usb_mixer_elem_info { struct usb_mixer_elem_list head; unsigned int control; /* CS or ICN (high byte) */ @@ -80,8 +87,12 @@ void snd_usb_mixer_notify_id(struct usb_mixer_interface *mixer, int unitid); int snd_usb_mixer_set_ctl_value(struct usb_mixer_elem_info *cval, int request, int validx, int value_set); -int snd_usb_mixer_add_control(struct usb_mixer_elem_list *list, - struct snd_kcontrol *kctl); +int snd_usb_mixer_add_list(struct usb_mixer_elem_list *list, + struct snd_kcontrol *kctl, + bool is_std_info); + +#define snd_usb_mixer_add_control(list, kctl) \ + snd_usb_mixer_add_list(list, kctl, true) void snd_usb_mixer_elem_init_std(struct usb_mixer_elem_list *list, struct usb_mixer_interface *mixer, diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c index b9ea4a42aee4e3cd62cd9a07f913d804c1e80380..5604cce30a582edf6b843309d74bf1bb95e597f5 100644 --- a/sound/usb/mixer_quirks.c +++ b/sound/usb/mixer_quirks.c @@ -169,7 +169,8 @@ static int add_single_ctl_with_resume(struct usb_mixer_interface *mixer, return -ENOMEM; } kctl->private_free = snd_usb_mixer_elem_free; - return snd_usb_mixer_add_control(list, kctl); + /* don't use snd_usb_mixer_add_control() here, this is a special list element */ + return snd_usb_mixer_add_list(list, kctl, false); } /* @@ -1171,7 +1172,7 @@ void snd_emuusb_set_samplerate(struct snd_usb_audio *chip, int unitid = 12; /* SamleRate ExtensionUnit ID */ list_for_each_entry(mixer, &chip->mixer_list, list) { - cval = (struct usb_mixer_elem_info *)mixer->id_elems[unitid]; + cval = mixer_elem_list_to_info(mixer->id_elems[unitid]); if (cval) { snd_usb_mixer_set_ctl_value(cval, UAC_SET_CUR, cval->control << 8, diff --git a/sound/usb/mixer_scarlett.c b/sound/usb/mixer_scarlett.c index c33e2378089d5936ac3466cc9c187fa62bb5a0ce..4aeb9488a0c99dea79ef2337264263a0d3c6f683 100644 --- a/sound/usb/mixer_scarlett.c +++ b/sound/usb/mixer_scarlett.c @@ -287,8 +287,7 @@ static int scarlett_ctl_switch_put(struct snd_kcontrol *kctl, static int scarlett_ctl_resume(struct usb_mixer_elem_list *list) { - struct usb_mixer_elem_info *elem = - container_of(list, struct usb_mixer_elem_info, head); + struct usb_mixer_elem_info *elem = mixer_elem_list_to_info(list); int i; for (i = 0; i < elem->channels; i++) @@ -447,8 +446,7 @@ static int scarlett_ctl_enum_put(struct snd_kcontrol *kctl, static int scarlett_ctl_enum_resume(struct usb_mixer_elem_list *list) { - struct usb_mixer_elem_info *elem = - container_of(list, struct usb_mixer_elem_info, head); + struct usb_mixer_elem_info *elem = mixer_elem_list_to_info(list); if (elem->cached) snd_usb_set_cur_mix_value(elem, 0, 0, *elem->cache_val); diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h index c892b4d1e733f7b061f7a983fd11025209eb944e..ec56ce38206198aa2d56f931685ea6a24a8c6f6a 100644 --- a/sound/usb/quirks-table.h +++ b/sound/usb/quirks-table.h @@ -3323,4 +3323,56 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"), } }, +/* + * MacroSilicon MS2109 based HDMI capture cards + * + * These claim 96kHz 1ch in the descriptors, but are actually 48kHz 2ch. + * They also need QUIRK_AUDIO_ALIGN_TRANSFER, which makes one wonder if + * they pretend to be 96kHz mono as a workaround for stereo being broken + * by that... + * + * They also have swapped L-R channels, but that's for userspace to deal + * with. + */ +{ + USB_DEVICE(0x534d, 0x2109), + .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) { + .vendor_name = "MacroSilicon", + .product_name = "MS2109", + .ifnum = QUIRK_ANY_INTERFACE, + .type = QUIRK_COMPOSITE, + .data = &(const struct snd_usb_audio_quirk[]) { + { + .ifnum = 2, + .type = QUIRK_AUDIO_ALIGN_TRANSFER, + }, + { + .ifnum = 2, + .type = QUIRK_AUDIO_STANDARD_MIXER, + }, + { + .ifnum = 3, + .type = QUIRK_AUDIO_FIXED_ENDPOINT, + .data = &(const struct audioformat) { + .formats = SNDRV_PCM_FMTBIT_S16_LE, + .channels = 2, + .iface = 3, + .altsetting = 1, + .altset_idx = 1, + .attributes = 0, + .endpoint = 0x82, + .ep_attr = USB_ENDPOINT_XFER_ISOC | + USB_ENDPOINT_SYNC_ASYNC, + .rates = SNDRV_PCM_RATE_CONTINUOUS, + .rate_min = 48000, + .rate_max = 48000, + } + }, + { + .ifnum = -1 + } + } + } +}, + #undef USB_DEVICE_VENDOR_SPEC diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index 4872c27f6054398ad06360afef78b675257adbd3..cd36394e27ae60c56ac6e18f3a1acc35ef2414ff 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c @@ -1164,6 +1164,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip) static bool is_itf_usb_dsd_2alts_dac(unsigned int id) { switch (id) { + case USB_ID(0x154e, 0x1002): /* Denon DCD-1500RE */ case USB_ID(0x154e, 0x1003): /* Denon DA-300USB */ case USB_ID(0x154e, 0x3005): /* Marantz HD-DAC1 */ case USB_ID(0x154e, 0x3006): /* Marantz SA-14S1 */ @@ -1318,13 +1319,14 @@ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe, && (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS) mdelay(20); - /* Zoom R16/24, Logitech H650e, Jabra 550a needs a tiny delay here, - * otherwise requests like get/set frequency return as failed despite - * actually succeeding. + /* Zoom R16/24, Logitech H650e, Jabra 550a, Kingston HyperX needs a tiny + * delay here, otherwise requests like get/set frequency return as + * failed despite actually succeeding. */ if ((chip->usb_id == USB_ID(0x1686, 0x00dd) || chip->usb_id == USB_ID(0x046d, 0x0a46) || - chip->usb_id == USB_ID(0x0b0e, 0x0349)) && + chip->usb_id == USB_ID(0x0b0e, 0x0349) || + chip->usb_id == USB_ID(0x0951, 0x16ad)) && (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS) mdelay(1); } diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h index 7c7c91e8df3f6e962454a246d77bc8e58bf69217..8ff1f2fa50865866e238aa3a0c771c6c1fb80dfa 100644 --- a/sound/usb/usbaudio.h +++ b/sound/usb/usbaudio.h @@ -37,7 +37,7 @@ struct snd_usb_audio { struct usb_interface *pm_intf; u32 usb_id; struct mutex mutex; - unsigned int autosuspended:1; + unsigned int system_suspend; atomic_t active; atomic_t shutdown; atomic_t usage_count; diff --git a/tools/lib/api/fs/fs.c b/tools/lib/api/fs/fs.c index 45b50b89009aafd13a84986c5a6ff6ade86853cc..c61841051a90ab349a31649bdcf2b16877d5f6ce 100644 --- a/tools/lib/api/fs/fs.c +++ b/tools/lib/api/fs/fs.c @@ -90,6 +90,7 @@ struct fs { const char * const *mounts; char path[PATH_MAX]; bool found; + bool checked; long magic; }; @@ -111,31 +112,37 @@ static struct fs fs__entries[] = { .name = "sysfs", .mounts = sysfs__fs_known_mountpoints, .magic = SYSFS_MAGIC, + .checked = false, }, [FS__PROCFS] = { .name = "proc", .mounts = procfs__known_mountpoints, .magic = PROC_SUPER_MAGIC, + .checked = false, }, [FS__DEBUGFS] = { .name = "debugfs", .mounts = debugfs__known_mountpoints, .magic = DEBUGFS_MAGIC, + .checked = false, }, [FS__TRACEFS] = { .name = "tracefs", .mounts = tracefs__known_mountpoints, .magic = TRACEFS_MAGIC, + .checked = false, }, [FS__HUGETLBFS] = { .name = "hugetlbfs", .mounts = hugetlbfs__known_mountpoints, .magic = HUGETLBFS_MAGIC, + .checked = false, }, [FS__BPF_FS] = { .name = "bpf", .mounts = bpf_fs__known_mountpoints, .magic = BPF_FS_MAGIC, + .checked = false, }, }; @@ -158,6 +165,7 @@ static bool fs__read_mounts(struct fs *fs) } fclose(fp); + fs->checked = true; return fs->found = found; } @@ -220,6 +228,7 @@ static bool fs__env_override(struct fs *fs) return false; fs->found = true; + fs->checked = true; strncpy(fs->path, override_path, sizeof(fs->path) - 1); fs->path[sizeof(fs->path) - 1] = '\0'; return true; @@ -246,6 +255,14 @@ static const char *fs__mountpoint(int idx) if (fs->found) return (const char *)fs->path; + /* the mount point was already checked for the mount point + * but and did not exist, so return NULL to avoid scanning again. + * This makes the found and not found paths cost equivalent + * in case of multiple calls. + */ + if (fs->checked) + return NULL; + return fs__get_mountpoint(fs); } diff --git a/tools/lib/api/fs/fs.h b/tools/lib/api/fs/fs.h index dda49deefb5212c9992faffaa5378bdd990c2546..57a3dc160b08677c63f22a43bb57b427eef01589 100644 --- a/tools/lib/api/fs/fs.h +++ b/tools/lib/api/fs/fs.h @@ -18,6 +18,18 @@ const char *name##__mount(void); \ bool name##__configured(void); \ +/* + * The xxxx__mountpoint() entry points find the first match mount point for each + * filesystems listed below, where xxxx is the filesystem type. + * + * The interface is as follows: + * + * - If a mount point is found on first call, it is cached and used for all + * subsequent calls. + * + * - If a mount point is not found, NULL is returned on first call and all + * subsequent calls. + */ FS(sysfs) FS(procfs) FS(debugfs) diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 04fc04b4ab67e03d798ff945a0dbe6d50b92daf0..247fbb5f6a389fabf4140c717eed5ba0fe5d18fd 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -778,6 +778,12 @@ static int add_special_section_alts(struct objtool_file *file) } if (special_alt->group) { + if (!special_alt->orig_len) { + WARN_FUNC("empty alternative entry", + orig_insn->sec, orig_insn->offset); + continue; + } + ret = handle_group_alt(file, special_alt, orig_insn, &new_insn); if (ret) @@ -1291,7 +1297,7 @@ static int update_insn_state_regs(struct instruction *insn, struct insn_state *s struct cfi_reg *cfa = &state->cfa; struct stack_op *op = &insn->stack_op; - if (cfa->base != CFI_SP) + if (cfa->base != CFI_SP && cfa->base != CFI_SP_INDIRECT) return 0; /* push */ diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c index e1ac51aaedcff4ee2441b7f9874fb53ece99c314..48d40b12d581fb752b15725b1468e226bc27bd93 100644 --- a/tools/perf/builtin-probe.c +++ b/tools/perf/builtin-probe.c @@ -377,6 +377,9 @@ static int perf_add_probe_events(struct perf_probe_event *pevs, int npevs) for (k = 0; k < pev->ntevs; k++) { struct probe_trace_event *tev = &pev->tevs[k]; + /* Skipped events have no event name */ + if (!tev->event) + continue; /* We use tev's name for showing new events */ show_perf_probe_event(tev->group, tev->event, pev, diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 429c3e140dc32190bd82146ea7bbd78eb98ded8e..35a10b59854470ad56a73563ffd32829c24cbfac 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -401,8 +401,7 @@ static size_t hists__fprintf_nr_sample_events(struct hists *hists, struct report if (evname != NULL) ret += fprintf(fp, " of event '%s'", evname); - if (symbol_conf.show_ref_callgraph && - strstr(evname, "call-graph=no")) { + if (symbol_conf.show_ref_callgraph && evname && strstr(evname, "call-graph=no")) { ret += fprintf(fp, ", show reference callgraph"); } diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c index 505c13bf7e30d210c700b3c686a23cd5311ed56b..40b5f656ebc39cdbee35118d4e61cf5fd41ae55e 100644 --- a/tools/perf/util/dso.c +++ b/tools/perf/util/dso.c @@ -36,6 +36,7 @@ char dso__symtab_origin(const struct dso *dso) [DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO] = 'D', [DSO_BINARY_TYPE__FEDORA_DEBUGINFO] = 'f', [DSO_BINARY_TYPE__UBUNTU_DEBUGINFO] = 'u', + [DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO] = 'x', [DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO] = 'o', [DSO_BINARY_TYPE__BUILDID_DEBUGINFO] = 'b', [DSO_BINARY_TYPE__SYSTEM_PATH_DSO] = 'd', @@ -118,6 +119,21 @@ int dso__read_binary_type_filename(const struct dso *dso, snprintf(filename + len, size - len, "%s", dso->long_name); break; + case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO: + /* + * Ubuntu can mixup /usr/lib with /lib, putting debuginfo in + * /usr/lib/debug/lib when it is expected to be in + * /usr/lib/debug/usr/lib + */ + if (strlen(dso->long_name) < 9 || + strncmp(dso->long_name, "/usr/lib/", 9)) { + ret = -1; + break; + } + len = __symbol__join_symfs(filename, size, "/usr/lib/debug"); + snprintf(filename + len, size - len, "%s", dso->long_name + 4); + break; + case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO: { const char *last_slash; diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h index 926ff2e7f668d18dc734c03bb3e1b8296b848dcd..ea198d19dadd33939e67044039db3d3d88593968 100644 --- a/tools/perf/util/dso.h +++ b/tools/perf/util/dso.h @@ -25,6 +25,7 @@ enum dso_binary_type { DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO, DSO_BINARY_TYPE__FEDORA_DEBUGINFO, DSO_BINARY_TYPE__UBUNTU_DEBUGINFO, + DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO, DSO_BINARY_TYPE__BUILDID_DEBUGINFO, DSO_BINARY_TYPE__SYSTEM_PATH_DSO, DSO_BINARY_TYPE__GUEST_KMODULE, diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index 6670e12a2bb3c741eee1e7b239b9e1817834c257..7c286756c34b2385f027b38b16607915ac7354b5 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c @@ -122,7 +122,7 @@ static struct symbol *__find_kernel_function(u64 addr, struct map **mapp) return machine__find_kernel_function(host_machine, addr, mapp); } -static struct ref_reloc_sym *kernel_get_ref_reloc_sym(void) +static struct ref_reloc_sym *kernel_get_ref_reloc_sym(struct map **pmap) { /* kmap->ref_reloc_sym should be set if host_machine is initialized */ struct kmap *kmap; @@ -134,6 +134,10 @@ static struct ref_reloc_sym *kernel_get_ref_reloc_sym(void) kmap = map__kmap(map); if (!kmap) return NULL; + + if (pmap) + *pmap = map; + return kmap->ref_reloc_sym; } @@ -145,7 +149,7 @@ static int kernel_get_symbol_address_by_name(const char *name, u64 *addr, struct map *map; /* ref_reloc_sym is just a label. Need a special fix*/ - reloc_sym = kernel_get_ref_reloc_sym(); + reloc_sym = kernel_get_ref_reloc_sym(NULL); if (reloc_sym && strcmp(name, reloc_sym->name) == 0) *addr = (reloc) ? reloc_sym->addr : reloc_sym->unrelocated_addr; else { @@ -764,6 +768,7 @@ post_process_kernel_probe_trace_events(struct probe_trace_event *tevs, int ntevs) { struct ref_reloc_sym *reloc_sym; + struct map *map; char *tmp; int i, skipped = 0; @@ -772,7 +777,7 @@ post_process_kernel_probe_trace_events(struct probe_trace_event *tevs, return post_process_offline_probe_trace_events(tevs, ntevs, symbol_conf.vmlinux_name); - reloc_sym = kernel_get_ref_reloc_sym(); + reloc_sym = kernel_get_ref_reloc_sym(&map); if (!reloc_sym) { pr_warning("Relocated base symbol is not found!\n"); return -EINVAL; @@ -783,9 +788,13 @@ post_process_kernel_probe_trace_events(struct probe_trace_event *tevs, continue; if (tevs[i].point.retprobe && !kretprobe_offset_is_supported()) continue; - /* If we found a wrong one, mark it by NULL symbol */ + /* + * If we found a wrong one, mark it by NULL symbol. + * Since addresses in debuginfo is same as objdump, we need + * to convert it to addresses on memory. + */ if (kprobe_warn_out_range(tevs[i].point.symbol, - tevs[i].point.address)) { + map__objdump_2mem(map, tevs[i].point.address))) { tmp = NULL; skipped++; } else { @@ -1762,8 +1771,7 @@ int parse_probe_trace_command(const char *cmd, struct probe_trace_event *tev) fmt1_str = strtok_r(argv0_str, ":", &fmt); fmt2_str = strtok_r(NULL, "/", &fmt); fmt3_str = strtok_r(NULL, " \t", &fmt); - if (fmt1_str == NULL || strlen(fmt1_str) != 1 || fmt2_str == NULL - || fmt3_str == NULL) { + if (fmt1_str == NULL || fmt2_str == NULL || fmt3_str == NULL) { semantic_error("Failed to parse event name: %s\n", argv[0]); ret = -EINVAL; goto out; @@ -2888,7 +2896,7 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev, /* Note that the symbols in the kmodule are not relocated */ if (!pev->uprobes && !pev->target && (!pp->retprobe || kretprobe_offset_is_supported())) { - reloc_sym = kernel_get_ref_reloc_sym(); + reloc_sym = kernel_get_ref_reloc_sym(NULL); if (!reloc_sym) { pr_warning("Relocated base symbol is not found!\n"); ret = -EINVAL; diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index ae0feea4e8b5f9bf9d6a5bb116f7312f436e8e1c..8f7f9d05f38c0f3c36be9fa91fd7769f2e3bdccc 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c @@ -114,6 +114,7 @@ enum dso_binary_type distro_dwarf_types[] = { DSO_BINARY_TYPE__UBUNTU_DEBUGINFO, DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO, DSO_BINARY_TYPE__BUILDID_DEBUGINFO, + DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO, DSO_BINARY_TYPE__NOT_FOUND, }; diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c index d028c2786802e22ad370ef81f5b00aa3ff42b519..821e5790eb0e688c5083a19baf926f5c59ca648c 100644 --- a/tools/perf/util/stat.c +++ b/tools/perf/util/stat.c @@ -350,8 +350,10 @@ int perf_stat_process_counter(struct perf_stat_config *config, * interval mode, otherwise overall avg running * averages will be shown for each interval. */ - if (config->interval) - init_stats(ps->res_stats); + if (config->interval) { + for (i = 0; i < 3; i++) + init_stats(&ps->res_stats[i]); + } if (counter->per_pkg) zero_per_pkg(counter); diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 27bffcb213eb735630cc2bdd04e5187cf53643f2..dea6f15af4859e9e48f254377781a33eba253713 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -64,6 +64,7 @@ static enum dso_binary_type binary_type_symtab[] = { DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE, DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP, DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO, + DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO, DSO_BINARY_TYPE__NOT_FOUND, }; @@ -1412,6 +1413,7 @@ static bool dso__is_compatible_symtab_type(struct dso *dso, bool kmod, case DSO_BINARY_TYPE__SYSTEM_PATH_DSO: case DSO_BINARY_TYPE__FEDORA_DEBUGINFO: case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO: + case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO: case DSO_BINARY_TYPE__BUILDID_DEBUGINFO: case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO: return !kmod && dso->kernel == DSO_TYPE_USER; diff --git a/tools/testing/selftests/networking/timestamping/rxtimestamp.c b/tools/testing/selftests/networking/timestamping/rxtimestamp.c index dd4162fc0419bca7a7dccd17db8b99b61fb7f4a3..7a573fb4c1c4ec10aa560e8dd36e53a27ba7feb7 100644 --- a/tools/testing/selftests/networking/timestamping/rxtimestamp.c +++ b/tools/testing/selftests/networking/timestamping/rxtimestamp.c @@ -114,6 +114,7 @@ static struct option long_options[] = { { "tcp", no_argument, 0, 't' }, { "udp", no_argument, 0, 'u' }, { "ip", no_argument, 0, 'i' }, + { NULL, 0, NULL, 0 }, }; static int next_port = 19999; diff --git a/tools/testing/selftests/networking/timestamping/timestamping.c b/tools/testing/selftests/networking/timestamping/timestamping.c index 5cdfd743447b72b465ffbff763a4981f3126a02f..900ed4b478996ea49e624df3fd269f116db2ea65 100644 --- a/tools/testing/selftests/networking/timestamping/timestamping.c +++ b/tools/testing/selftests/networking/timestamping/timestamping.c @@ -332,10 +332,16 @@ int main(int argc, char **argv) int val; socklen_t len; struct timeval next; + size_t if_len; if (argc < 2) usage(0); interface = argv[1]; + if_len = strlen(interface); + if (if_len >= IFNAMSIZ) { + printf("interface name exceeds IFNAMSIZ\n"); + exit(1); + } for (i = 2; i < argc; i++) { if (!strcasecmp(argv[i], "SO_TIMESTAMP")) @@ -369,12 +375,12 @@ int main(int argc, char **argv) bail("socket"); memset(&device, 0, sizeof(device)); - strncpy(device.ifr_name, interface, sizeof(device.ifr_name)); + memcpy(device.ifr_name, interface, if_len + 1); if (ioctl(sock, SIOCGIFADDR, &device) < 0) bail("getting interface IP address"); memset(&hwtstamp, 0, sizeof(hwtstamp)); - strncpy(hwtstamp.ifr_name, interface, sizeof(hwtstamp.ifr_name)); + memcpy(hwtstamp.ifr_name, interface, if_len + 1); hwtstamp.ifr_data = (void *)&hwconfig; memset(&hwconfig, 0, sizeof(hwconfig)); hwconfig.tx_type = diff --git a/tools/testing/selftests/x86/protection_keys.c b/tools/testing/selftests/x86/protection_keys.c index 5d546dcdbc805b93516e73ea1dd0757fd1449aff..b8778960da1067d6a3764b3c561048b54f782e82 100644 --- a/tools/testing/selftests/x86/protection_keys.c +++ b/tools/testing/selftests/x86/protection_keys.c @@ -24,6 +24,7 @@ #define _GNU_SOURCE #include #include +#include #include #include #include @@ -612,10 +613,10 @@ int alloc_random_pkey(void) int nr_alloced = 0; int random_index; memset(alloced_pkeys, 0, sizeof(alloced_pkeys)); + srand((unsigned int)time(NULL)); /* allocate every possible key and make a note of which ones we got */ max_nr_pkey_allocs = NR_PKEYS; - max_nr_pkey_allocs = 1; for (i = 0; i < max_nr_pkey_allocs; i++) { int new_pkey = alloc_pkey(); if (new_pkey < 0) diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c index 63c6b630174fd7a1c6b5d23a3a1face000b26a76..d2003d149bdfcd2d38c2c8a3291cb90f64cbd7b1 100644 --- a/virt/kvm/arm/vgic/vgic-mmio.c +++ b/virt/kvm/arm/vgic/vgic-mmio.c @@ -260,7 +260,7 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq, static void vgic_change_active_prepare(struct kvm_vcpu *vcpu, u32 intid) { if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 || - intid > VGIC_NR_PRIVATE_IRQS) + intid >= VGIC_NR_PRIVATE_IRQS) kvm_arm_halt_guest(vcpu->kvm); } @@ -268,7 +268,7 @@ static void vgic_change_active_prepare(struct kvm_vcpu *vcpu, u32 intid) static void vgic_change_active_finish(struct kvm_vcpu *vcpu, u32 intid) { if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 || - intid > VGIC_NR_PRIVATE_IRQS) + intid >= VGIC_NR_PRIVATE_IRQS) kvm_arm_resume_guest(vcpu->kvm); }