diff --git a/Documentation/ABI/stable/sysfs-bus-nvmem b/Documentation/ABI/stable/sysfs-bus-nvmem index 5923ab4620c5d829e4a4f8f5fd338fd9b00d54ec..9ffba8576f7b690e334517a8f8fd647f2441b670 100644 --- a/Documentation/ABI/stable/sysfs-bus-nvmem +++ b/Documentation/ABI/stable/sysfs-bus-nvmem @@ -6,6 +6,8 @@ Description: This file allows user to read/write the raw NVMEM contents. Permissions for write to this file depends on the nvmem provider configuration. + Note: This file is only present if CONFIG_NVMEM_SYSFS + is enabled ex: hexdump /sys/bus/nvmem/devices/qfprom0/nvmem diff --git a/Documentation/ABI/testing/sysfs-class-power b/Documentation/ABI/testing/sysfs-class-power index 5e23e22dce1b514d133ac6371163da37fe5a7ed1..c1075ecfdb4b53dfdb17a75679114c1f9d94bf87 100644 --- a/Documentation/ABI/testing/sysfs-class-power +++ b/Documentation/ABI/testing/sysfs-class-power @@ -144,7 +144,8 @@ Description: Access: Read Valid values: "Unknown", "Good", "Overheat", "Dead", "Over voltage", "Unspecified failure", "Cold", - "Watchdog timer expire", "Safety timer expire" + "Watchdog timer expire", "Safety timer expire", + "Over current", "Warm", "Cool", "Hot" What: /sys/class/power_supply//precharge_current Date: June 2017 diff --git a/Documentation/ABI/testing/sysfs-fs-f2fs b/Documentation/ABI/testing/sysfs-fs-f2fs index bd8a0d19abe67d6f0acaa891f805c2e21181d06b..427f5b45c67f132f32f70badec5e1bc92cde6323 100644 --- a/Documentation/ABI/testing/sysfs-fs-f2fs +++ b/Documentation/ABI/testing/sysfs-fs-f2fs @@ -323,3 +323,18 @@ What: /sys/fs/f2fs//mounted_time_sec Date: February 2020 Contact: "Jaegeuk Kim" Description: Show the mounted time in secs of this partition. + +What: /sys/fs/f2fs//data_io_flag +Date: April 2020 +Contact: "Jaegeuk Kim" +Description: Give a way to attach REQ_META|FUA to data writes + given temperature-based bits. Now the bits indicate: + * REQ_META | REQ_FUA | + * 5 | 4 | 3 | 2 | 1 | 0 | + * Cold | Warm | Hot | Cold | Warm | Hot | + +What: /sys/fs/f2fs//iostat_period_ms +Date: April 2020 +Contact: "Daeho Jeong" +Description: Give a way to change iostat_period time. 3secs by default. + The new iostat trace gives stats gap given the period. diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 3d71612fb9f9b745ac6c580bafd9b35469fdb412..febe6f08d5e79d05047021f431a81cd6187697b8 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -4032,7 +4032,9 @@ [[,]s[mp]#### \ [[,]b[ios] | a[cpi] | k[bd] | t[riple] | e[fi] | p[ci]] \ [[,]f[orce] - Where reboot_mode is one of warm (soft) or cold (hard) or gpio, + Where reboot_mode is one of warm (soft) or cold (hard) or gpio + (prefix with 'panic_' to set mode for panic + reboot only), reboot_type is one of bios, acpi, kbd, triple, efi, or pci, reboot_force is either force or not specified, reboot_cpu is s[mp]#### with #### being the processor diff --git a/Documentation/arm64/index.rst b/Documentation/arm64/index.rst new file mode 100644 index 0000000000000000000000000000000000000000..5c0c69dc58aa3887e52453f3630afdb9ed8e2bd1 --- /dev/null +++ b/Documentation/arm64/index.rst @@ -0,0 +1,27 @@ +================== +ARM64 Architecture +================== + +.. toctree:: + :maxdepth: 1 + + acpi_object_usage + arm-acpi + booting + cpu-feature-registers + elf_hwcaps + hugetlbpage + legacy_instructions + memory + pointer-authentication + silicon-errata + sve + tagged-address-abi + tagged-pointers + +.. only:: subproject and html + + Indices + ======= + + * :ref:`genindex` diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt index eeb3fc9d777b810f0ff53a32bc9803896d6d3942..667ea906266ed0a54c0d2f514d4269ce905bb61f 100644 --- a/Documentation/arm64/silicon-errata.txt +++ b/Documentation/arm64/silicon-errata.txt @@ -59,6 +59,7 @@ stable kernels. | ARM | Cortex-A73 | #858921 | ARM64_ERRATUM_858921 | | ARM | Cortex-A55 | #1024718 | ARM64_ERRATUM_1024718 | | ARM | Cortex-A76 | #1463225 | ARM64_ERRATUM_1463225 | +| ARM | Neoverse-N1 | #1542419 | ARM64_ERRATUM_1542419 | | ARM | MMU-500 | #841119,#826419 | N/A | | | | | | | Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 | diff --git a/Documentation/arm64/tagged-pointers.rst b/Documentation/arm64/tagged-pointers.rst new file mode 100644 index 0000000000000000000000000000000000000000..eab4323609b97fd8dafa962404713aab8c8c04e9 --- /dev/null +++ b/Documentation/arm64/tagged-pointers.rst @@ -0,0 +1,75 @@ +========================================= +Tagged virtual addresses in AArch64 Linux +========================================= + +Author: Will Deacon + +Date : 12 June 2013 + +This document briefly describes the provision of tagged virtual +addresses in the AArch64 translation system and their potential uses +in AArch64 Linux. + +The kernel configures the translation tables so that translations made +via TTBR0 (i.e. userspace mappings) have the top byte (bits 63:56) of +the virtual address ignored by the translation hardware. This frees up +this byte for application use. + + +Passing tagged addresses to the kernel +-------------------------------------- + +All interpretation of userspace memory addresses by the kernel assumes +an address tag of 0x00, unless the application enables the AArch64 +Tagged Address ABI explicitly +(Documentation/arm64/tagged-address-abi.rst). + +This includes, but is not limited to, addresses found in: + + - pointer arguments to system calls, including pointers in structures + passed to system calls, + + - the stack pointer (sp), e.g. when interpreting it to deliver a + signal, + + - the frame pointer (x29) and frame records, e.g. when interpreting + them to generate a backtrace or call graph. + +Using non-zero address tags in any of these locations when the +userspace application did not enable the AArch64 Tagged Address ABI may +result in an error code being returned, a (fatal) signal being raised, +or other modes of failure. + +For these reasons, when the AArch64 Tagged Address ABI is disabled, +passing non-zero address tags to the kernel via system calls is +forbidden, and using a non-zero address tag for sp is strongly +discouraged. + +Programs maintaining a frame pointer and frame records that use non-zero +address tags may suffer impaired or inaccurate debug and profiling +visibility. + + +Preserving tags +--------------- + +Non-zero tags are not preserved when delivering signals. This means that +signal handlers in applications making use of tags cannot rely on the +tag information for user virtual addresses being maintained for fields +inside siginfo_t. One exception to this rule is for signals raised in +response to watchpoint debug exceptions, where the tag information will +be preserved. + +The architecture prevents the use of a tagged PC, so the upper byte will +be set to a sign-extension of bit 55 on exception return. + +This behaviour is maintained when the AArch64 Tagged Address ABI is +enabled. + + +Other considerations +-------------------- + +Special care should be taken when using tagged pointers, since it is +likely that C compilers will not hazard two virtual addresses differing +only in the upper byte. diff --git a/Documentation/sound/designs/compress-offload.rst b/Documentation/sound/designs/compress-offload.rst index ad4bfbdacc83517e1196c1ef7f86b7381b6041e2..a29e4e632bf6b305861d7859de03b99c6da7da7e 100644 --- a/Documentation/sound/designs/compress-offload.rst +++ b/Documentation/sound/designs/compress-offload.rst @@ -183,6 +183,11 @@ partial drain EOF is reached and now DSP can start skipping padding delay. Also next write data would belong to next track +- set_next_track_param +This routine is called to send to DSP codec specific data of subsequent track +in gapless before first write. + + Sequence flow for gapless would be: - Open - Get caps / codec caps @@ -194,6 +199,7 @@ Sequence flow for gapless would be: - Indicate next track data by sending set_next_track - Set metadata of the next track - then call partial_drain to flush most of buffer in DSP +- set codec specific data of subsequent track - Fill data of the next track - DSP switches to second track diff --git a/Documentation/sound/hd-audio/index.rst b/Documentation/sound/hd-audio/index.rst index f8a72ffffe66f49560a2f5cdb9ae61243824098f..6e12de9fc34e29eb168e33ac2ec53a3955ad9fc3 100644 --- a/Documentation/sound/hd-audio/index.rst +++ b/Documentation/sound/hd-audio/index.rst @@ -8,3 +8,4 @@ HD-Audio models controls dp-mst + realtek-pc-beep diff --git a/Documentation/sound/hd-audio/models.rst b/Documentation/sound/hd-audio/models.rst index e06238131f77e027980df2bc710644c2545f8570..8c0de54b5649b7f6d6e804ea40e4b23f185c4121 100644 --- a/Documentation/sound/hd-audio/models.rst +++ b/Documentation/sound/hd-audio/models.rst @@ -216,8 +216,6 @@ alc298-dell-aio ALC298 fixups on Dell AIO machines alc275-dell-xps ALC275 fixups on Dell XPS models -alc256-dell-xps13 - ALC256 fixups on Dell XPS13 lenovo-spk-noise Workaround for speaker noise on Lenovo machines lenovo-hotkey diff --git a/Documentation/sound/hd-audio/realtek-pc-beep.rst b/Documentation/sound/hd-audio/realtek-pc-beep.rst new file mode 100644 index 0000000000000000000000000000000000000000..be47c6f76a6e9fefed4a94c7b44bfea035d830ab --- /dev/null +++ b/Documentation/sound/hd-audio/realtek-pc-beep.rst @@ -0,0 +1,129 @@ +=============================== +Realtek PC Beep Hidden Register +=============================== + +This file documents the "PC Beep Hidden Register", which is present in certain +Realtek HDA codecs and controls a muxer and pair of passthrough mixers that can +route audio between pins but aren't themselves exposed as HDA widgets. As far +as I can tell, these hidden routes are designed to allow flexible PC Beep output +for codecs that don't have mixer widgets in their output paths. Why it's easier +to hide a mixer behind an undocumented vendor register than to just expose it +as a widget, I have no idea. + +Register Description +==================== + +The register is accessed via processing coefficient 0x36 on NID 20h. Bits not +identified below have no discernible effect on my machine, a Dell XPS 13 9350:: + + MSB LSB + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | |h|S|L| | B |R| | Known bits + +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+ + |0|0|1|1| 0x7 |0|0x0|1| 0x7 | Reset value + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + +1Ah input select (B): 2 bits + When zero, expose the PC Beep line (from the internal beep generator, when + enabled with the Set Beep Generation verb on NID 01h, or else from the + external PCBEEP pin) on the 1Ah pin node. When nonzero, expose the headphone + jack (or possibly Line In on some machines) input instead. If PC Beep is + selected, the 1Ah boost control has no effect. + +Amplify 1Ah loopback, left (L): 1 bit + Amplify the left channel of 1Ah before mixing it into outputs as specified + by h and S bits. Does not affect the level of 1Ah exposed to other widgets. + +Amplify 1Ah loopback, right (R): 1 bit + Amplify the right channel of 1Ah before mixing it into outputs as specified + by h and S bits. Does not affect the level of 1Ah exposed to other widgets. + +Loopback 1Ah to 21h [active low] (h): 1 bit + When zero, mix 1Ah (possibly with amplification, depending on L and R bits) + into 21h (headphone jack on my machine). Mixed signal respects the mute + setting on 21h. + +Loopback 1Ah to 14h (S): 1 bit + When one, mix 1Ah (possibly with amplification, depending on L and R bits) + into 14h (internal speaker on my machine). Mixed signal **ignores** the mute + setting on 14h and is present whenever 14h is configured as an output. + +Path diagrams +============= + +1Ah input selection (DIV is the PC Beep divider set on NID 01h):: + + + | | | + +--DIV--+--!DIV--+ {1Ah boost control} + | | + +--(b == 0)--+--(b != 0)--+ + | + >1Ah (Beep/Headphone Mic/Line In)< + +Loopback of 1Ah to 21h/14h:: + + <1Ah (Beep/Headphone Mic/Line In)> + | + {amplify if L/R} + | + +-----!h-----+-----S-----+ + | | + {21h mute control} | + | | + >21h (Headphone)< >14h (Internal Speaker)< + +Background +========== + +All Realtek HDA codecs have a vendor-defined widget with node ID 20h which +provides access to a bank of registers that control various codec functions. +Registers are read and written via the standard HDA processing coefficient +verbs (Set/Get Coefficient Index, Set/Get Processing Coefficient). The node is +named "Realtek Vendor Registers" in public datasheets' verb listings and, +apart from that, is entirely undocumented. + +This particular register, exposed at coefficient 0x36 and named in commits from +Realtek, is of note: unlike most registers, which seem to control detailed +amplifier parameters not in scope of the HDA specification, it controls audio +routing which could just as easily have been defined using standard HDA mixer +and selector widgets. + +Specifically, it selects between two sources for the input pin widget with Node +ID (NID) 1Ah: the widget's signal can come either from an audio jack (on my +laptop, a Dell XPS 13 9350, it's the headphone jack, but comments in Realtek +commits indicate that it might be a Line In on some machines) or from the PC +Beep line (which is itself multiplexed between the codec's internal beep +generator and external PCBEEP pin, depending on if the beep generator is +enabled via verbs on NID 01h). Additionally, it can mix (with optional +amplification) that signal onto the 21h and/or 14h output pins. + +The register's reset value is 0x3717, corresponding to PC Beep on 1Ah that is +then amplified and mixed into both the headphones and the speakers. Not only +does this violate the HDA specification, which says that "[a vendor defined +beep input pin] connection may be maintained *only* while the Link reset +(**RST#**) is asserted", it means that we cannot ignore the register if we care +about the input that 1Ah would otherwise expose or if the PCBEEP trace is +poorly shielded and picks up chassis noise (both of which are the case on my +machine). + +Unfortunately, there are lots of ways to get this register configuration wrong. +Linux, it seems, has gone through most of them. For one, the register resets +after S3 suspend: judging by existing code, this isn't the case for all vendor +registers, and it's led to some fixes that improve behavior on cold boot but +don't last after suspend. Other fixes have successfully switched the 1Ah input +away from PC Beep but have failed to disable both loopback paths. On my +machine, this means that the headphone input is amplified and looped back to +the headphone output, which uses the exact same pins! As you might expect, this +causes terrible headphone noise, the character of which is controlled by the +1Ah boost control. (If you've seen instructions online to fix XPS 13 headphone +noise by changing "Headphone Mic Boost" in ALSA, now you know why.) + +The information here has been obtained through black-box reverse engineering of +the ALC256 codec's behavior and is not guaranteed to be correct. It likely +also applies for the ALC255, ALC257, ALC235, and ALC236, since those codecs +seem to be close relatives of the ALC256. (They all share one initialization +function.) Additionally, other codecs like the ALC225 and ALC285 also have this +register, judging by existing fixups in ``patch_realtek.c``, but specific +data (e.g. node IDs, bit positions, pin mappings) for those codecs may differ +from what I've described here. diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt index e33b6808db6b323aa4a106da4559f27fc778029c..a48baf202265895e7409df90ae773875e494d2c7 100644 --- a/Documentation/sysctl/vm.txt +++ b/Documentation/sysctl/vm.txt @@ -64,7 +64,6 @@ Currently, these files are in /proc/sys/vm: - swappiness - user_reserve_kbytes - vfs_cache_pressure -- watermark_boost_factor - watermark_scale_factor - zone_reclaim_mode @@ -873,26 +872,6 @@ ten times more freeable objects than there are. ============================================================= -watermark_boost_factor: - -This factor controls the level of reclaim when memory is being fragmented. -It defines the percentage of the high watermark of a zone that will be -reclaimed if pages of different mobility are being mixed within pageblocks. -The intent is that compaction has less work to do in the future and to -increase the success rate of future high-order allocations such as SLUB -allocations, THP and hugetlbfs pages. - -To make it sensible with respect to the watermark_scale_factor parameter, -the unit is in fractions of 10,000. The default value of 15,000 means -that up to 150% of the high watermark will be reclaimed in the event of -a pageblock being mixed due to fragmentation. The level of reclaim is -determined by the number of fragmentation events that occurred in the -recent past. If this value is smaller than a pageblock then a pageblocks -worth of pages will be reclaimed (e.g. 2MB on 64-bit x86). A boost factor -of 0 will disable the feature. - -============================================================= - watermark_scale_factor: This factor controls the aggressiveness of kswapd. It defines the diff --git a/Makefile b/Makefile index 64db14e7ceeafb2c4276a2dc6bf31e69aca42829..50bafb2faec3fd0a6ed7e99049f1068f1a3556f9 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 4 PATCHLEVEL = 19 -SUBLEVEL = 114 +SUBLEVEL = 123 EXTRAVERSION = NAME = "People's Front" diff --git a/abi_gki_aarch64.xml b/abi_gki_aarch64.xml index 781feb79eca7cb1b0df24b85a857a117bb9d4aad..b28e81d0c00dcb250c9106a904eb246d1abdd222 100644 --- a/abi_gki_aarch64.xml +++ b/abi_gki_aarch64.xml @@ -16,6 +16,7 @@ + @@ -78,9 +79,12 @@ + + + @@ -132,6 +136,8 @@ + + @@ -153,6 +159,7 @@ + @@ -227,6 +234,7 @@ + @@ -235,12 +243,10 @@ - + - - @@ -263,6 +269,8 @@ + + @@ -334,6 +342,7 @@ + @@ -346,7 +355,6 @@ - @@ -397,6 +405,7 @@ + @@ -408,6 +417,7 @@ + @@ -466,9 +476,7 @@ - - @@ -493,6 +501,7 @@ + @@ -620,13 +629,6 @@ - - - - - - - @@ -660,17 +662,22 @@ + + + + + @@ -865,6 +872,7 @@ + @@ -890,6 +898,9 @@ + + + @@ -951,6 +962,7 @@ + @@ -959,6 +971,7 @@ + @@ -995,7 +1008,6 @@ - @@ -1036,6 +1048,7 @@ + @@ -1053,6 +1066,7 @@ + @@ -1164,8 +1178,13 @@ + + + + + @@ -1173,6 +1192,7 @@ + @@ -1259,6 +1279,11 @@ + + + + + @@ -1287,18 +1312,13 @@ - - - - - @@ -1376,6 +1396,8 @@ + + @@ -1487,6 +1509,8 @@ + + @@ -1552,7 +1576,6 @@ - @@ -1677,6 +1700,7 @@ + @@ -1693,6 +1717,7 @@ + @@ -1867,6 +1892,24 @@ + + + + + + + + + + + + + + + + + + @@ -1876,11 +1919,9 @@ - - @@ -1920,12 +1961,7 @@ - - - - - @@ -1976,6 +2012,7 @@ + @@ -2052,6 +2089,7 @@ + @@ -2061,6 +2099,7 @@ + @@ -2075,6 +2114,7 @@ + @@ -2214,17 +2254,18 @@ + - + @@ -2248,23 +2289,19 @@ + + + + + + + - - - - - - - - - - - - - + + @@ -2331,6 +2368,9 @@ + + + @@ -2344,6 +2384,7 @@ + @@ -2352,8 +2393,10 @@ + + @@ -2479,6 +2522,114 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -2495,6 +2646,7 @@ + @@ -2543,154 +2695,157 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + - + - - + + - + - + - + + + + + - + - + - - - - - - - + + + + + + + - + - + - - + + - - + + - - + + - - + + - + - - + + - + - + - + - + - + - + - + - + - + @@ -2699,2229 +2854,2352 @@ - + - + - + - + - + - + - + - + - + - - + + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - + - + - + - - - + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - - - - - - - - - + - + - + - + - + - + - - - - - + + - - + + - - + + - - + + - - - - + - + + + + - + - - - - + - + + + + - + + + + + + + + + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + + + + + + + + + + + - - + + - + - + - - + + - - + + - - + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + + + + + + + + + + + + + - + - + - + - + - - + + + + + + + + + + + + + + - - + + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - - - + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + + + + + + + + + + + - + - + - + - - - - + + + + - + - + - - - - + + + + - + - + - + - + - + - + - - + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + + + + + + + + + + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + + + + + + + - + - + - + - - - + + + - + - - - - - - - - - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + + + + + + + + + + + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - - + + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - + - + - + - + - + - + - + - + - + - + - - - - - - - - - - - - - - - - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - + - - + + - - + + - + - - - - - - - - - - - - - - - - - - - + - - + + - - - - + - + - + - + - + + + + + + + - + + + + + + + + + + - + - + - + - - - - + - - + + - - + + - - - - + - - - - - + + - - + + - - - - - + + - - - - + - + - + - + - - - - + - + - - + + - - + + - - - - + - + - - - - + - + - + - + - + - + - + - - + + - - + + - + - - - - - - - + - - - - - - - - - - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - + - + - - - - - + + - - + + - - + + - - + + - - + + - - + + - - - - - + + - + - + - + - + - + - + - + - + - + - - - - + - + - + - - - - + - + - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - - + + - - + + - - + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + + + + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + + + + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -4945,16 +5223,16 @@ - + - + - + - + @@ -4971,11 +5249,11 @@ - + - + @@ -4993,10 +5271,10 @@ - + - + @@ -5007,52 +5285,52 @@ - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + @@ -5060,9 +5338,9 @@ - + - + @@ -5070,79 +5348,90 @@ - + - + - + - + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + + + + + + + + + + + + - + - + - + @@ -5150,7 +5439,7 @@ - + @@ -5187,23 +5476,23 @@ - + - + - + - + - + @@ -5225,7 +5514,7 @@ - + @@ -5234,10 +5523,10 @@ - + - + @@ -5245,7 +5534,7 @@ - + @@ -5259,10 +5548,10 @@ - + - + @@ -5275,57 +5564,57 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - - + + @@ -5336,173 +5625,173 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -5517,17 +5806,17 @@ - + - + - + @@ -5537,10 +5826,10 @@ - + - + @@ -5553,7 +5842,7 @@ - + @@ -5562,100 +5851,100 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -5672,142 +5961,148 @@ - - + + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + + + + + - - - + + + - - + + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - + @@ -5819,7 +6114,7 @@ - + @@ -5827,7 +6122,7 @@ - + @@ -5836,14 +6131,14 @@ - + - + - + @@ -5851,10 +6146,10 @@ - + - + @@ -5863,51 +6158,51 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -5923,10 +6218,10 @@ - + - + @@ -5982,27 +6277,27 @@ - + - + - + - + - + - + - + - + @@ -6011,10 +6306,10 @@ - + - + @@ -6026,20 +6321,20 @@ - + - + - + - + @@ -6047,7 +6342,7 @@ - + @@ -6055,10 +6350,10 @@ - + - + @@ -6075,248 +6370,246 @@ - + - + - + - + - + - - - - - - - - - - - - + + + + + + + + + + + - + - - - - - - + + + + + + - - - - + + + + - + - - + + - - - - - - - + + + + + + + - - - + + + - - - - - - - + + + + + + + - - - - - - + + + + + + - + - - - + + + - - - - - - - - - - + + + + + + + + + + - + - + - - - - - - - - - - - - - - + + + + + + + + + + + + + + - - - - - - - - - - + + + + + + + + + + - - - - - - - - - - - + + + + + + + + + + + - - - - - - + + + + + + - - - - - - - - - + + + + + + + + + - - - - - - - - - - - - - - - + + + + + + + + + + + + + + - - - - + + + + - - - + + + - - - + + + - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + - + - + - - - - - - - - - + + + + + + + + + - - + + - - + + - - - + + + - + @@ -6324,130 +6617,130 @@ - + - + - - - - + + + + - + - + - + - + - - + + - - + + - + - - + + - + - + - + - - + + - - - - + + + + - - - - + + + + - + - - + + - - + + - + - - - + + + - - + + - + - - + + - - - - + + + + - - - + + + - - - + + + - + - + - + - + @@ -6455,66 +6748,66 @@ - - - + + + - + - + - + - - - + + + - + - + - + - - - + + + - - - - - - + + + + + + - + - + - + - + - + @@ -6524,37 +6817,37 @@ - + - + - + - - + + - - + + - - + + - + @@ -6562,85 +6855,85 @@ - + - - + + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -6655,37 +6948,42 @@ - - - - - - - - - - - - - + + + + + - + - - - + + + + + + + + + + + + + + + + @@ -6694,77 +6992,100 @@ - - + + + + + + + - + - + - + - - - - - - - + + + + + + + - - - - + + + + - - + + - - - - + + + + - - - + + + - - - + + + - - - + + + + + + + + + + + + + - - - - - - - - + + + + + + + + + + + + + + + + - + @@ -6772,682 +7093,1083 @@ - + - + - + - + - + - + - - - - - - - + + + + + + - - + + - - - - - - + + + + + + + + + + - + - + - - + + + + - - + + - + - + - + - + - + - + - - + + - + - + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + - - + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - - - - - - - + + + + - - - - - - - + + + + + + + - + - - + + - - - + + + - - - + + + - - + + - - - + + + - - + + - - + + - - - + + + - - - + + + - - + + + + + + + - - + + - - - + + + + + + + + - - + + - - - + + + + + + + + - - + + - - - - + + + + - - - - + + + + - + - + - + - - + + - - + + + + - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - + + + + - + - + - + - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - + + + - - + + - - - + + + - - - + + + - + - - - + + + - - + + - + - - - + + + - - - - - + + + + + - + - - - - + + + + - - - + + + - - + + - + + + + + + + + + + + + + + + + + + + - - - + + + - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + - + - - - + + + + - - - - + + + + - + - - - + + + + + + + + + + + + - - + + - + - - - - + + + + - + - - - - + + + + - - - + + - - - - - - + + + + + + - - - - + + + + - - - - + + + + - - + + - - + + - + - + - + - - + + - - + + - + - - - - - - - - - - + + - - + + + + + + + - - + + @@ -7459,606 +8181,689 @@ - - - + + + - + - - + + - - + + - - + + - + - + - - - + + + - - + + - + - + - + - + - - + + - - + + - - + + - + - + - - - + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + - + - + - - - + + + - - + + - - + + - - - + + + - - - - + + + + - - - + + + - - - - + + + + - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - + - + - + - + - - - - - - - - - + + + + + + + + + - + - - - + + + - - - - - + + + + + - - - - - - + + + + + + - + - + - + - + - + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - + + + - - - - + + + + - + - - + + - - - - + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - - - + + + + + + + - - + + - + - - - + + + - - - + + + - - + + - - + + - - - - + + + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - - + + - - + + @@ -8069,331 +8874,331 @@ - + - + - - + + - - + + - + - - + + - + - + - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + - + - + - + - + - + - + - - + + - - + + - + - + - + - - + + - + - - + + - - + + - - + + - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - + - - + + - - + + - - + + - - + + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -8402,1166 +9207,1212 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - - + + - - + + - - + + - - + + - + - + - - + + - - + + - - + + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - + - - + + - - - - - - - + - + - - + + - - + + - + + + + - + - + - - + + - - + + - - + + - - + + + + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + + + + - + + + + + + + + + + + + + + + + + + + + + + + + + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + + + + + + + + + + + + + + + + + + + + + + + - - - + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - + - + - + + + + - + - - + + + + + + + + - + - + - + - + - - - - - - - - - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - - + + - + - + - + - + - + - - - + + + - + - + - - - + + + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -9570,161 +10421,161 @@ - + - + - + - + - + - - - + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - + - - + + - + - + - - + + - + - + - + - + - + - + - + - - + + - + - - + + - + - - + + - + - + - + - - + + - + - + - + - + - + @@ -9798,12 +10649,12 @@ - + - + @@ -9830,10 +10681,10 @@ - + - + @@ -9859,16 +10710,16 @@ - + - + - + - - + + @@ -9881,87 +10732,87 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -9970,24 +10821,24 @@ - + - + - + - + - + - + - - + + @@ -10000,274 +10851,286 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + + + + + + + + + + + - + @@ -10278,25 +11141,25 @@ - + - + - + - + - + - + - + @@ -10305,13 +11168,13 @@ - + - + - + @@ -10320,27 +11183,27 @@ - + - + - + - + - + - + - + @@ -10358,7 +11221,7 @@ - + @@ -10369,10 +11232,10 @@ - + - + @@ -10383,10 +11246,10 @@ - + - + @@ -10400,10 +11263,10 @@ - + - + @@ -10474,7 +11337,7 @@ - + @@ -10503,7 +11366,7 @@ - + @@ -10517,7 +11380,7 @@ - + @@ -10531,17 +11394,17 @@ - + - - + + - + @@ -10549,10 +11412,10 @@ - + - + @@ -10563,30 +11426,30 @@ - + - + - + - + - + - + - + - + @@ -10595,178 +11458,178 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -10774,7 +11637,7 @@ - + @@ -10785,7 +11648,7 @@ - + @@ -10814,7 +11677,7 @@ - + @@ -10828,10 +11691,10 @@ - + - + @@ -10845,16 +11708,16 @@ - + - + - + @@ -10865,10 +11728,10 @@ - + - + @@ -10897,7 +11760,7 @@ - + @@ -10911,18 +11774,18 @@ - + - + - + - + - + @@ -10930,54 +11793,54 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -10991,227 +11854,257 @@ - + - + - + - + - + - + + + + + + + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - - + + - + + + + + + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - + + + + - + - + - + - + - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + + + + + + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + @@ -11219,887 +12112,887 @@ - - + + - - + + - + - + - + - + - + - + - + - - - + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - - - - - - + - + + + + - + - + - + - - - - + - - - - + - + + + + - + + + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - + + + + - + - + - + - + - + - + - + - + - + - - + + - - + + - - + + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - - + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + @@ -12114,961 +13007,973 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - + - - - - - - - - - - + - + - + - + - + - + + + + - + - - + + - - + + - - - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + + - + + + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - - - - - - - - - - - - - - - + - - - - + - - - - + - - - - + - + - + - + - + + + + - + + + + - + + + + + + + + + + + + + + + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - + - - - - + - + - - + + - - + + - - + + - - + + - + + + + + + + - + - - + + - - + + - - + + - - + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - - + + - - + + - - + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - + - + - - + + - - + + - - + + - - + + - - + + - + - + - + - + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - - + + - + - + - + - - + + - - + + - - + + - + + + + + + + + + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -13095,13 +14000,13 @@ - + - + - + @@ -13194,1424 +14099,1433 @@ - + - + - + - + - + - + - + - - - - - + + + + + - + - - - + + + - + - + - + - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - + + + + - + - - - + + + - - + + - - + + - + - + - - - + + + - - + + - + - - - - - + + + + + - - - + + + - - - - + + + + - - - + + + - - - - - + + + + + - - - - - - - + + + + + + + - - - - - - - - - - - + + + + + + + + + + + - - + + - - - + + + - - + + - - - - - - - + + - - - - - + + + + - - - - - - + + + + + + + + + + + + - - - + + + - + - - - + + + - - + + - - - - + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + - - - - + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + - - - - - - + + + + + + + + + + + + + + + + + + + + + - - + + - + - - + + - + - - - - + + + + - - - - - + + + + - - - - + + + + - - + + - + - - + + + + + + + + + + + + + - - + - - - - + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + - - - - - - + + + + + - - - - - - - + + + + - - - - + + + + + - - - + + + + + - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - + + + + + + - - - - + + + + - - - - - - - - - - - - - - - - - - - - + + + + - - - - - + + + + - - - + + + + + - - - - + + + + - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - + + + + + + - - - - - + + + + + + + - - - - + + + + - - - + + + - - - - - + + + + + + + + + + + + + - - - - - - + + + + - - - + + + + + + + - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - + + + - - - + + + - - + + - - - - + + + + - - + + - + - - + + - - - + + + - - - - + + + + - - + + - - - + + + - - + + - - - + + + - - - + + + - - - + + + - - - - + + + + - - - + + + - - - - + + + + - - - - + + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - + + - - - - + + + + - + - + - + + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - + + + + + - - + + - + - - - - - - - - + + + + + + + + - + - - - + + + - - + + - - + + - - + + - + - - + + - - + + - + - - + + - - - + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - - - - - - - - - + + + + + + + + + + + + + - - + + - - - + + + - - - - + + + + - - - - - + + + + + - - - - + + + + - - - + + + - - - - - + + + + + - - + + - - - + + + - - + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - + + + - - + + - + - + - + - + - + - + - + - - + + - - - + + + - - + + - - - + + + - - - - + + + + - - + + - - + + - + @@ -14619,610 +15533,665 @@ - + - + - - - - + + + + - + - + - - - - - + + + + + - - - - - + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - - + + + + + + - - - - + + + + - - - + + + - - + + - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + - - + + - + - + - + - + - + - + - + - - + + - - - - + + + + - - - + + + - - - - - + + + + + - - + + - - - - - + + + + + - + - + - + - + - - - - + + + + - - - - + + + + - - + + - + - + - + + - - - - + + + + - - - + + + - - - - + + + + - + - + - + - - - - + + + + - - + + - - + + - - - + + + - - - + + + - - + + - - - - - + + + + + - - - - + + + + - + - - - - - + + + + + - - + + - + - - + + - - - + + + + + + - + - + - + - + - + - + - - + + - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - + - + - + - + - - - - + + + + - - - - + + + + - + - - - - + + + + - - - - - - + + + + + + - - - - - - - - + + + + + + + + - - - + + + - - + + + - - + + + + + + + - - - - + + + + - - - - + + + + - - - + + + - - - + + + - + - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - + - + - - + + - - + + - + - + - + - - + + - + - + - + - + @@ -15233,215 +16202,230 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + + - - - + + + - - - + + + - - - - + + + + - - + + - - + + - - + + - - - - + + + + - - - + + + - - - - + + + + - - - - + + + + - - - + + + - - - + + + - - - - + + + + - - + + - - - + + + - - - + + + - - + + - - + + - - + + - - - + + + - - - + + + - - - + + + - - - + + + - - + + - - + + - + - - + + - + @@ -15449,208 +16433,203 @@ + + + + - - + + - - + + - - + + - - - - - + + + + + - - - + + + - - + + - - - - + + + + - - - - + + + + - - + + - + - - - - - - - - - - - - - + + + + - - - + + + - - - - - + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -15671,366 +16650,366 @@ - + - + - - - - - - + + + + + + - - - - - - - - + + + + + + + + - - - + + + - - - + + + - - - - - - - + + + + + + + - - - - + + + + - + - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - + - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -16042,1188 +17021,1188 @@ - + - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + - - - + + + - - - + + + - - - - - + + + + + - - - - + + + + - - - - + + + + - - - + + + - - - - + + + + - - - - - + + + + + - - - + + + - - - + + + - - + + - - - + + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - + + + + - + - + - + - - - - - - - - + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + - - + + - - - - - - - + + + + + + + - - - - + + + + - - - - - - - + + + + + + + - - - - + + + + - - - - - + + + + + - - - - - - - + + + + + + + - - - - - + + + + + - - - - + + + + - - - - + + + + - - - - - + + + + + - - - - - + + + + + - + - + - - - - - + + + + + - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + - + - - - - - + + + + + - - - + + + - + - - - - - - + + + + + + - - - - - + + + + + - - - - - - - + + + + + + + - - - - - - - - + + + + + + + + - - + + - - - - + + + + - - - + + + - - - - + + + + - - - - + + + + - + - + - + - + - + - - + + - - + + - + - + - + - + - + - + - - + + - - + + - - - + + + - + - - + + - - + + - - - + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - - - - - - + - + - + - - + + - - + + + + + - + + + + + + + - + - + - + - + - + - + - + - + - + - - - - - - - - - + + + + + + + + + - - - - + + + + - + - - - + + + - + - - + + - + - - - - - - + + + + + + - - + + - - + + - - + + - - + + - + - - - + + + - - - + + + - - - + + + - - - - - + + + + + - - - + + + - - - + + + - - - + + + - - + + - - + + - - - - + + + + - - - - - + + + + + - - + + - - - + + + - - - - + + + + @@ -17231,1348 +18210,1523 @@ - - + + - - + + - + - - - + + + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - - + + + + + + - - + + - + - - - + + + - - - - + + + + - - + + - + - - - - - - + + + + + + - - + + - - - - - - + + + + + + - + - - - - + + + + - - + + - - + + - - - + + + - - + + - - - + + + - + - + - + - + - - - - + + + + - - - + + + - - + + - - - - + + + + - - + + - + - + - + - - + + - - - - - - - + + + + + + + - - - - - - - - + + + + + + + + - - - - - - - - - + + + + + + + + + - - - - - - + + + + + + + + + + + + - - - - + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - + + + + - - - - - - - - - - + + + + + - - - - - + + + + + - - - + + + - - - - + + + + - - + + - - - + + + - - - + + + - - - + + + - - - - - - - + + + + + + + - - + + - + - - - - + + + + - - + + - - + + - - + + - - + + - - + + - - - + + + - - + + - - + + - - + + - - + + - - + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - + + - + - - - + + + - - - + + + - - + + - - - + + + - - - - - - - - + + + - - - + + + - - + + - - + + - - + + - + - - - - + + + + - - + + - - - - + + + + - - + + - + - + - + - + - + - + - + - - - - + + + + - - + + - - - + + + - - - + + + - - - + + + - - - + + + - - - - + + + + - + - - - + + + - - - - + + + + - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + + + + + - - + + + + + + + + + - - + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - - - + - + - + + + + - + - - + + - - + + - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + - - + + - - + + - - - - + + + + - - - + + + - - + + - - - + + + - - + + - - - + + + - - - - + + + + - - - - - + + + + + - - + + - + - - - - - + + + + + - - + + - + - - - - - + + + + + - - - - + + + + - - + + - - - + + + - + - + - - + + - - + + - + - - + + - + + + + - - - - - + + + + + - - + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -18589,1176 +19743,1192 @@ - - - - - - - - - - - - - + + + + + + + + + + + + + - - + + - - + + - - - - - + + + + + - - - - + + + + - - - - + + + + - - - - - + + + + + - - - - + + + + - - - + + + - - - + + + - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + - - + + - - - - + + + + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - - + + - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - + - - + + - - + + - - + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + - - + + - - + + - + - + - + - + - + - - - - - + + + + + - - - - + + + + - - - - + + + + - - - - + + + + - - - - - + + + + + - - - - - - + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + - - - - + + + + + + + + + - - - + + + - - - + + + - - - - + + + + + - - - + + + - + - - + + + - - + + - + - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + - - + + - - + + - - + + - + - + - + - + - + - + - - - + + + - - - + + + - - - - + + + + - - - - + + + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - + - - + + - - + + - - + + - - + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - - - - + + + + + + + + - + - + - + - - - - - - + + + + + + - + - - + + - - + + - - - - + + + + - - - - + + + + - - + + - + - - - - - + + + + + - - + + - - - + + + - - - + + + - - + + - + - - + + - + - - + + - + - + - + - + - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - + - + - + - + - + - + - + - - - - + - + - + + + + - + - + - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - + + + + + + + + + - - + + - + - - + + - - - - - + + + + + - - + + - - - - - + + + + + - - - - + + + + - - - - + + + + - - - - + + + + - - + + - - - - + + + + - - - - + + + + - + - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - + - - + + - - + + - - + + - - + + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - - - + + + + + + + - - + + - + - - + + - + - - - - - - - - - - - - - - + + + + + + + + - - + + + + + + + + - - + + - + - + - + - + - + - + - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + @@ -19766,1375 +20936,1446 @@ - - - + + + - - - - - + + + + + - - - - + + + + - + - - - - + + + + - - - - + + + + - - - - + + + + - - - - + + + + - - - + + + - + - + - + - + - - + + + + + + + - - + + - - - - - + + + + + + + + + + + + + + + + + + + + + + - - - - + + + + - - - + + + - - + + - - - - - - - + + + - - - + + + - - + + - - + + - - - + + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - + - + - + - + - + - + - + - - - + + + - - + + - - + + - + - - - + + + - + - + - + - - + + - - - + + + - + - - + + - - - + + + - + - + - - + + - - + + - - - + + + - - + + - - - + + + - - - + + + - - - + + + - + - - + + - + - - - + + + - - - - + + + + - - - + + + - + - - + + - - - + + + - - + + - - + + - - + + - - - - + + + + - - + + - - - - + + + + - - - - - - - - - - - - - + + + + + + + + + + + + + - - - + + + - - - + + + - - - - + + + + - - - - + + + + - - + + - - - - + + + + - - - - - - - + + + + + + + - - + + - - - + + + - - - + + + - - - + + + - - + + - - + + - - - + + + - - - + + + - - - + + + - - - - - + + + + + - + - - - - + + + + - - - + + + - - - - + + + + - + - + - + - + - - + + - - - + + + - - - - - - - + + + + + + + - - - - - - - - - - + + - + - + - + - - - - + + + + - - + + - - - - - + + + + + - - - - - - + + + + + + - - - + + + - + - + - - + + + + + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - + + + - - - + + + - + - + - - + + - - - - + + + + - - + + - - - - + + + + - - + + - + - + - - + + - + - - + + - - - - - - - - - - - - - + + + + + + + + + + + + + + - - + + - - - + + + - + - - + + - + - - - - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - - + + - - + + - - + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - + - + + + + - + - + - + - + - + - + - + - + - - + + - + - + - + - + - + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - - + + @@ -21176,8 +22417,8 @@ - - + + @@ -21185,1231 +22426,1288 @@ - + - + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - - + + - + - + - + - + - + - - - - - - - + - + - - - - + - - - - + - + - + - + + + + - + - - + + + + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - + - - + + - + - - + + - + - - + + - + - - - - - - - - - - + - + - - + + - - + + - - + + - - + + - + - - + + - - + + - - + + - - + + - - - - - - - - - - + - - - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - + - - - - - - - + - + - - - - + - + - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + @@ -22417,412 +23715,415 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -22830,67 +24131,67 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -22908,9 +24209,9 @@ - + - + @@ -22922,12 +24223,12 @@ - + - + - + @@ -22939,51 +24240,51 @@ - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - - + + @@ -22993,1831 +24294,1835 @@ - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - - + + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - + + - + - - + + + + + - - + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - + - + - - - + + + - - + + - + - + - - - + + + - + - + - - + + - + - + - - + + - - - + + + - - + + - - + + - + - - + + - + - + - - - + + + - - - + + + - - - - - + + + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - - + + + + + + + + + + + + + + + + + + + + + - - - - - + + + + - - - - + + + - - - - - + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + - - - - + + + + - - - + + + + - - - + + + + - - - + + + + - - - - - - + + + + - - - - + + + + - - - - - - + + + - - - - - + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + - - - - + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + - - - - + + + + + + - - - - - + + + + + - - - - + + + + + - - - - - - + + + + - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - + + + + + + + + - - - - + + + - - - - + + + + - - - - + + + + - - + + - - - - - - - - - - - - - - - - - - - - - - - + - - - - - + + + - - - - - - + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - - - + + + - - - - - - - - - - - + - - - - - - - - - - - - - + + + + + - - - - - + + + - - - - - - - + + + - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + - + - - - + + + - - - - + + + + - - + + - - + + - - + + - + - - + + - - + + - - - - + + + + - - + + - - + + - - - + + + - - + + - - - + + + - - - + + + - - + + - - - + + + - - - + + + - - - + + + - - + + - - - - + + + + - - - - + + + + - - - + + + - - + + - - + + - - - + + + - - - + + + - - + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - - + + + + - - - + + + - - - + + + - - - + + + - + - + - + - + - - - - - - - - - - - - + + + + - + - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - - - + + + + + + + - - + + - + - + - - - + + + - - + + - - + + - - + + - - + + @@ -24828,950 +26133,982 @@ - - + + - - + + - - - - - + + + + + - - - - - + + + + + - + - + - - - + + + - - - + + + - - - + + + - - - + + + - - + + - + - - - - + + + + - + - + - - + + - - + + - - + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - + - - + + - - + + - + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - - + + - - + + - - + + - - + + - - + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + + + + + + + + + + - + - + + + + + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - + + + + + - - + + - - - + + + - - + + - - + + - - - + + + - - - - + + + + - + + - + - - - + + + - - - + + + - - - - - + + + + + - - - - - + + + + + - - - + + + - - - - + + + + - - + + - - - + + + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + @@ -25779,26 +27116,26 @@ - + - + - + - + - - + + - - + + @@ -25806,8 +27143,8 @@ - - + + @@ -25817,876 +27154,891 @@ - - - - - - + + + + + - - + + - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + - - - - + + + + - - - - - - + + + + + + - - - + + + - - + + - - + + - - - + + + - - + + - - + + - - + + - - + + - + - - - + + + - + - - - + + + - - + + - - + + - - - - + + + + - - + + - - - - + + + + - - - - + + + + - + - - + + - - - - + + + + - + - + - + - + - - - - + + + + - - - + + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - - + + - + - - - + + + - - + + - - - - + + + + - - - + + + - - - + + + - + - - + + - + - - - - - - - - - - - - - - - - - - - - - + + + + - - - - + + + + - - + + + + + + + + + + + + + + + + + + + - + - - - - + + + + - - - - - + + + + + + + + + + - - + + - - - + + + - - - - + + + + - - - + + + - - - - + + + + - - - + + + - - - + + + - - - - + + + + - + - + - + - + - + - + - - + + - - - + + + - - + + - + - - - + + + - - + + - + - - - - + + + + - - - + + + - - - - + + + + - - - - - + + + + + - - - + + + - - + + - + - + - + - + - + - + - + - + - + - + - + - - + + - - + + - - + + - - + + - - + + - - + + - + - + - + - + + + + - + - + - + - + + + + - + - + - + - + - + - - - - - - - - - + + + + + + + + + - - + + - - + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + @@ -26695,124 +28047,124 @@ - - - - - + + + + + - - - - - - - - - + + + + + + + + + - + - - - + + + - + - - + + - + - + - + - + - - - + + + - - - - - - - - + + + + + + + + - - - - + + + + - - - + + + - - + + - + - + - + - + - + - - - - - + + + + + - - - - - + + + + + @@ -26820,934 +28172,929 @@ - - - - + + + + - - + + - - + + - - - + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + - - - + + + - - + + - - - + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - + - - + + - + - - + + - - - + + + - + - + - + - + - + - + - + - + - - - - - - - - - - + + + + + - - - - + + + + - - - - + + + + - - - - - + + + + - - - + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - + + + - - + + - - + + - - + + - - + + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - - - + + + - - + + - - + + - - - + + + - - - - + + + + - - - - - + + + + + - + - + - - + + - - + + - - + + - - - - - + + + + + - + - - - - + + + + - + - + - + - + - - + + - - + + - - - - + + + + - - - - - - - - - + + + + + + + + + - + - - - - - - + + + + + + - + - - - - - + + + + + - + - - - - + + + + - + - - - - + + + + - - + + - - + + - + - + - + - + - + - - - + + + - - + + - + - + - + - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + - + - + - + - + - + - + - - - + + + - - - + + + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + @@ -27772,8 +29119,8 @@ - - + + @@ -27785,8 +29132,8 @@ - - + + @@ -28001,113 +29348,115 @@ - - - - - - - - + + + + + + + + + + - - + + - + - + - + - + - + - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + - + - - - + + + - - - - - + + + + + - - - - - - - - - + + + + + + + + + - - - - - + + + + + - - - + + + - - - - + + + + - - - - - + + + + + - - - - - + + + + + - - + + - + - + @@ -28117,296 +29466,350 @@ - + - + - - + + - - + + - + - + - + - + - + - + - + - + - + - + - - - - - + + + + + - - - - - + + + + + - - - + + + - - - - - - + + + + + + - - + + - - - + + + - - + + - - - + + + - - - + + + - - + + - - + + - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - + + + + + + - + - + - + - + - + - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - + - - + + - - + + - - + + - - + + - - + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + @@ -28442,54 +29845,54 @@ - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - - + + @@ -28497,93 +29900,93 @@ - - + + - + - + - + - + - + - + - - + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + @@ -28591,699 +29994,699 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - + + + + + + + - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - - - - + + + + - - - - + + + + - + - - - - - - - + + + + + + + - - + + - + - + - + - - - + + + - - - + + + - + - - - + + + - - + + - - - + + + - - + + - - + + - - - - + + + + - - + + - - + + - - + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - - - + - + - + - - + + - - + + - + - + - + - + - + - + - + - + - - + + - - + + - - + + - - + + - - + + - - + + + + + - - + + - - + + - - + + - - + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -29303,1833 +30706,1647 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - - - - - - + - + - - + + - - + + - - + + - - + + - - + + - + - + + + + + + + + + + - - + + - + - + - + - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - - + + - - + + - - + + - - - - + + + + - + - + - + - - - + + + - - - + + + - - - + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - + + + + - - + + - - + + - - - + + + - - + + - - - - + + + + - - - - - + + + + + - - + + - - - + + + - + - + - - - - - - - - - - - + + + + + + + + + + + - - - - - - + + + + + + - + - - - - + + + + - + - - + + - + - + - - + + - - + + - + - + - - - - + + + + - - - - + + + + - - + + - - + + - - + + - - + + - - - - - + + + + + - - + + - - - + + + - - - - - - - + + + + + + + - + - + - - - - - + + + + + - - + + - + - + - - - + + + - - + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - - - + + + - + - + - - - - + + + + - - - + + + - + - - - + + + - - - - - + + + + + - + - - - + + + - - + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + - + + - + - + - + - + - + - + - + - + + + + + + + - + - + - + - + + + + + + + - + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - + - + - - - - + - - - - - - - - - - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + - - + + - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - + + + + - - + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - + + + + - - - - - - + + + - - + + + + - - - - - - - + + + + + + + - - + + - - - - - - - - - - - + + + - - - - - - + + + - - - + + - - - + + + - - - + + + - - - - - + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + - - - + + + - - - - + + + + - - - - - - - - - - - + + + - - - - + + - - - + + + + - - + + + - - - - + + + + + + + - - - - + + + + + + - - - - - - - + + + + + - - - - - - + + + + + + - - - - - + + + + + - - - - - + + + - - - - - + + + + + - - + + - - - - - + + + + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - - + + - - + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + + + + + + + + + + + + + - + - + - + - + - + - + - + - + - + - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - + + + + + + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + + + + + + + + + + + + + - + @@ -31149,324 +32366,336 @@ - + - + - + - + - + - + - + - + - - - + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - - - + + + - - - - + + + + - - - - + + + + - - - - + + + + - - - - - + + + + + - - - + + + - - - - + + + + - - - + + + - - + + - - + + - - - - - + + + + + - + - + - + - + - + - - - - + + + + - - - - - + + + + + - + - - - - - - + + + + + + - - + + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - - - - + - + + + + - + + + + + + + + + + - + + + + - + - + - + - + - + - + - + - + - + - + - + - - + + @@ -31476,593 +32705,593 @@ - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - + - + - - + + - - + + + + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - + + + + - + - + - + - + - - + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - + + + - - + + - - - + + + - - - + + + - - - + + + - - + + - - + + - - - - + + + + - - - + + + - - + + - + @@ -32072,1397 +33301,972 @@ - - - + + + - - + + - + - + - - - + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - + + + - + - - + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + - - - + + + + + - - - + + + + + - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + - - + + - - - + + + - - - + + + - - - + + + - - - + + + - - + + - - - + + + - - - + + + - - - - + + + + - - + + - - - + + + - + - + - - - - - + + + + + - - - + + + - - - - - - - - - - + + + + + + + + + + - + - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + - - - + + + - - + + - + - + - + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - - - - - - - - + + + + + + + + - - + + - + - - - + + + - - - - + + + + - - - - + + + + - - - + + + - - + + - - + + - - + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + - + - - - - - - - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + - - - - + + + + - - + + - - - + + + - - + + - - + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + - - + + - + - + - + - + - + - - - - - - - - - - - - - - + + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + - - - + + + + - - - + + + + + + + - - - - + + + + - - - - + + + + - - - - + + + + - - - - + + + + - - - - + + + + - - - - + + + + - - - - + + + + - - - - + + + + - - - + + + - - + + - + - + - + - - - + + + - + - + + + + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - - + + - - + + - - + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + - + - + - - - + + + - + - - - + + + - - - - - - + + + + + + - - - - + + + + - - - + + + - - - - - + + + + + - - + + - - + + - + - + - - - - + + + + - + - + - + - + - + - + @@ -33471,753 +34275,758 @@ - + - + - + - + - + - + - + - + - - + + - - - - - + + + + + - - + + - + - - - + + + - - - - + + + + - - - + + + - + - + - - - - - - - + + + + + + + - + - + - + - + + - - + + - + - + - - + + - - + + - + - - + + - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - - - - + + + + + + - - + + - + - + - + - + - + - + - + - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + @@ -34225,357 +35034,357 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -34592,19 +35401,19 @@ - + - + - + - + - + @@ -34618,7 +35427,7 @@ - + @@ -34632,10 +35441,10 @@ - + - + @@ -34661,10 +35470,10 @@ - + - + @@ -34681,16 +35490,16 @@ - + - + - + @@ -34698,13 +35507,13 @@ - + - + - + @@ -34718,13 +35527,13 @@ - + - + - + @@ -34735,240 +35544,243 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + + + + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + @@ -34977,380 +35789,380 @@ - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -35362,64 +36174,64 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -35448,7 +36260,7 @@ - + @@ -35457,7 +36269,7 @@ - + @@ -35465,321 +36277,321 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + @@ -35795,1809 +36607,1792 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - + + + + + + + + + + + + + + + - + - - - - - - - + + + + + + + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - + - + - - + + - - - - - - + + + + + + - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - + + + + - - - - + + + + - - - - + + + + - - - - + + + + - - - - + + + + - - - - + + + + - - - - + + + + - - - - + + + + - - - - + + + + - - - - + + + + - - - - + + + + - - - - + + + + - - - - + + + + - - - - + + + + - - - - + + + + - - - - + + + + - - - - + + + + - - + + - + - - - - + + + + - - - - + + + + - - - - + + + + - - - - + + + + - - - - + + + + - - - - + + + + - - - - + + + + - - - - + + + + - - - - + + + + - - - - + + + + - - - - + + + + - - - - + + + + - - - - + + + + - - - - + + + + - - - - + + + + - - - - + + + + - - - - + + + + - - - - + + + + - - - - + + + + - - - - + + + + - - - - + + + + - - - - + + + + - - - - + + - - - - + + + + - - - - + + + - - - - + + - - - - + + + - - - - + + + - - - - + + + - - - - + + + - - - - + + + - - - - + + + - - - - + + + - - + + + + + - - - - + + + - - - + + + + - - + + + - - - + + + + - - - + + + + - - - + + + + + - - - + + + - - - + + + - - - + + + + + - - - + + + + - - - - - + + + - - - + + + - - - - + + + - - - + + + - - - - + + + - - - - + + + + - - - - - + + + - - - + + + - - - + + + - - - - - + + + - - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - - + + + - - - + + + - - - + + + - - - + + + + - - - + + + + - - - + + + + - - - + + + + - - - + + + + - - - + + + - - - + + + - - - + + + + - - - + + + + + + - - - + + + + - - - - - - - - - - - - - - - + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + - - - - + + + + - - + + - - + + - - - - + + + + - - + + - + - - - + + + - + - - - + + + - - + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - - - - - + + + + + + + + + - - - + + + - - + + - - + + - - + + - - - - - + + + + + - - - - + + + + - - - + + + - - - + + + - - + + - - + + - - + + - + - - + + - + - + - - + + - + - - - - + + + + + - - + + - - + + - + - - - - - - - - - - - - - - - - - - - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -37609,76 +38404,76 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -37687,11 +38482,11 @@ - + - - + + @@ -37706,214 +38501,214 @@ - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - + - + - + - + - + - + - + - + - + - - + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + @@ -37923,818 +38718,869 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + + + + + + + + + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + - - - - + + + + - - - - - - - - - - - + + + + + + + + + + + - - - - - + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + - - + + - + + - - + - - - - - + + + + + - - - - - - + + + + + + - - + + - - - - + + + + - - - + + + - - + + - - - + + + - - + + - - - - - - + + + + + + - - - - - - + + + + + + - - - - - - - + + + + + + + - + - + - + - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - + + + + - - + + - - - + + + - + - + - + - + - + - + - + - + - - + + - - + + - - + + - - + + - - + + - + - + - + - + - + - + - - + + - - + + - - + + - + - + - + - + - + - + - + - + - + - + - + - - + + - - + + - + - - + + - - + + - - + + - - + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + - - - - + + + + - - - - + + + + - - - + + + - - - - + + + + - - - - + + + + - - - - - + + + + + - - - + + + - - + + - - - + + + - - - + + + - - + + - - - - + + + + - - + + - - + + @@ -38742,67 +39588,67 @@ - - + + - + - + - + - + - + - + - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + - + - - + + - - + + - - - + + + - - + + - - + + @@ -38811,11633 +39657,12733 @@ - - - - - - - - + + + + + + + + - - - + + + - - + + - - - - - - - + + + + + + + + + + + + + + + - - - - - - - + + + + + + - - - - - - + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + - - - + + + + + - - - - + + + + + + - - - - + + + - - + + + - - - + + + + + + + + + + + + + + + + - - - - + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - + + + + - - - - + + + + - - + + - - + + - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - - + + + + + + - - + + - - + + + + + + + + + + + + + + - - - - - - - - - - - - + + - - + + + + + + + + + + - - + + - - + + - + - - + + + + + + + + + + - + - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - + - + - + - + - + - + - - - - - - - + - + - - - - + - - - - - + + - - + + - - + + - - + + - - + + - - + + - + - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + + + + + + + + + + + + + + + + + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - + + - - + + - - + + - - + + - - - - + - + - - - - + - - - - + - + + + + + + + + + + + + + - + - + - + - + - + - - - - + - - - - - + + - - + + - - + + - + - - - - + - - - - + - + + + + - + + + + - + - + - + - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - + - + - + - + - + - + - + - + - + - + - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - + - - + + - + - - - - - - - - - - - - - - - - - - - - - - - - - - + - + - + - + - + - + - + - + - + - + + + + + + + - + - + - - + + - - + + - - + + - - + + - - + + - - + + - + - + - - + + - - + + - - + + - - + + - - + + - - + + - - - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - + - + - + - + - - + + - - + + + + + + + + - + - + - + - + - - - - - - - - - + - + - - - - + - - + + - - - - + - + - - - - - + + - - + + - - - - + - + - - - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - - - + + - - + + - - + + - - + + - - - - + + - - + + - - + + - - + + - - + + - - - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + + + - - + + - - + + - - + + + + + - - + + - - + + - - + + + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + + + - - + + - - + + + + - - + + - - + + - - + + - - + + - - + + - + - + - - - - + - + - - + + - - + + - - + + + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - - - + + - - + + - - + + - - - - - - - + - + - + + + + - + - + - - - - + - + - + + + + - + - + - - + + - - + + - - + + - - + + + + - - + + - - + + - - + + + + - - - - - - - + - - + + - - + + - - + + + + - - + + - - + + - - + + - - + + + + - - + + - - + + - - + + + + - - + + - - + + - - + + - - + + - - + + - - + + - + - + - + - + - + - + - - + + + + - - + + - - - - - - - - + + - + - - - - + - - - - + + - + + + + - + - + + + + - + - - + + + + - - + + - - + + - - + + - - + + - - + + + + - - + + - - + + - - + + - + - + - + - + + + + - + - + - + - + - - - - - - - - - - + - - - - + - + - - + + + + - - + + + + - - + + - - + + - - + + - - + + - - - - + - + - - - - - - - - - - + - - + + - - - - + + - - + + - - - - + + - + - + - - - - - - - + - + - - - - + - + + - - - - - - - + - - + + - - - - + + - - - - + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + - - + + - - + + - - + + - - + + - - + + - - + + + + - - + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + - + - - + + - + - + - + - - - - - + + + + + - + - + - - + + - - + + - - + + - - + + + + + - + - + - - + + - - + + - - + + - + - + - + - + - - - - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - + - - - - + + - - + + - + - + - + - - - - - - - - - - - - + - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + - + - + + + + - + - + - - + + + + + + + + + + - + - + - + + + + + + + + + + + + + - + + + + - + - + - - + + - - + + - - + + - - + + - - + + + + - - + + - - + + - - + + - - + + + + - - + + - + + - + + + + - + - - - - + - + - + - + - - + + - - + + - - + + + + - - + + - - + + - - + + - - + + - - + + - - + + - - - - - - - + - - + + - + - + - - - - + - - + + - + - + - + - - - - - - - - - - - - - - - - - - - - - - + - - + + - - + + - - + + + + - - + + - - + + + + + - - + + - - + + - - + + + + - - + + - - + + - - + + - + - + - - - - + - - - - + - - - - - + + + + - - + + - - + + - - + + - - + + - - + + - - + + - - - - + + - + - - + + + + + - + - + - - - - - - - - - - - - + - - - - + - - + + - + - + - + - + - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + + + + - + - + - + - + - - - - + - + - - - - + + - - + + - - + + - - + + - - + + - - - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - + - - - - + - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + - - + + + + + + + + + + + + + + - + + + + - + + + + - + - + - - - - + + - - + + - - - - + + - - + + + + + + + + - + - + - + - + - + + + + - + - - + + - - + + + + + - + - - - - + - - + + + + - - + + - - + + - - + + - - + + + + - - + + - - + + - - + + + + - - + + - - + + - - + + - - + + - - + + - - + + + + - - + + - + - + - - + + - - + + + + + + + + - + - + - - - - + - + - - + + + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - + - - - - - - - - + + - - - - + + - - + + - + - + - - - - + + - - + + - - + + - - + + - - - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - - - + + - + - + - - + + - - + + - - + + + + + - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + - - + + - - + + - - + + - - + + - - - - + + - - + + - + - + - - + + - + - + - - - - - - - - - - - - - - - - - - - - - + - - + + - - + + - + - + - - + + - - + + - - + + - + - + - - - - + - + - - + + - - + + - - - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - - - + + - - + + - - + + - - + + - - + + - - + + - - - - + + - - + + - - - - + + - - + + - - + - - - - + - - + + - + - - - - + - - + + - - + + - + - + - - - - + - - - - + + - + - + - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - + - + - + - + - + - + - + - - - + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - + + + + + + + + + + + + + + + + - + - + + + + + + + + + + + + + - + - + - + + + + - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + - + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - - - - + + + - - + + - - + + + + - - + + - + + + + - + + + + - + - + - + - - - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - + - - - - + - + + + + - + + + + - + - - - - + + - - + + - - - - + + - - - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + + + + + + + + + + + + + + + + - + - - - - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + - - + + - - - - + + + + - - + + - - + + + + - - + + - - - - + + - - + + - - - - + + - - + + - - - - + + - + + + + + + + + + + + + + + + + + + + + + + - + - - + + + + + + + - + - + - + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + - + + + + - + - + - + - + + + + + + + - + - - - - + - - + + + + - - + + - - + + + + - - + + - + - + - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + - + - - - - + - + - + - + + + + + + + + + + + + + - + - + - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + - - - - + - + - + - + - + - + - + - - - - + - + + + + + + + + + + + + + + + + + + + - + - + - + + + + + + + + + + + + + + + + + + - + - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - + - + - + - + - - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + - + - - + + + + - - + + - - + + + + - - + + - - + + - - + + - - + + - - + + - - + + + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + + + - - + + - - + + - - + + - - + + + + - - + + - - + + - - + + - - + + - - + + + + - - + + - - + + + + + - + - + - + + + + - + + + + + + + + + + + + + + + + - + - + - - + + + + + + + + + + + + + + - - + + - - + + + + + + + - + - + + + + - + + + + + + + - - + - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - + + - + - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + + + + - + + + + - + - - - - + - + - + + + + - + + + + + + + + + + + + + + + + + + + - + - + - - + + - - + + + + + + + + + + + + + + - + - - + + - - + + - - + + - - + + - - + + - - + + + + + - + - + - + + + + + + + + + + - + + + + + + + + + + - + - + - - - - + + - - + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - + - + - + - - - - + - - - - + - - - - + - + - + - + - + + + + + + + + + + - + - - - - - - + + + - - + + - - + + - - + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + - + - + - + - + + + + - + - + - + - - - - - - - + + - - - - - - - - - - - - - - - - - - - - - - + - + - + + + + - + + + + - + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + - - - - + - + - - + + + + + + + + - + - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + + + - - + + - - + + + + - - + + - - + + + + - - + + - - + + - - + + - - + + + + - - + + - - + + + + + + + + + + + + + + + + + + - - + + - - - - - - + + + + + + + + + + + + + + + + + - + - + - + + + + - + - + - + - + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - + - - + + - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - + + + + + - - - - + + + - - + + + + + - + - - - - - - - - - + + - - - - - - - - - - - - - + + - - - - - + + + + - - - - - + + + + + - - - - - - - + + - - - - - - - - - - - - - - - - - - - + + + - - - - - - + + + + - - - - - + + + + - - - + + + + + + + + + + + + + + - - - - + + + - - - - - + + + - - - - + + + - - - + + - - - - + + + - - - - - - + + + - - - - - - + + + - - - - - - - - - - - - - - - - - - - + + + - - + + + + + + + + + - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + - - + + + + - - + + - - + + + + - - - - + + - - + + - - + + - - + + + + - - - - + + - - + + + + - - + + - - + + + + - - + + - - + + + + - - + + - - + + + + - - + + - - + + + + + + + + + + - - + + - - + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + - + - + - + - + - + - + - + + + + - + - + - + - - + + + + + + + + + + + + + - + - + - - + + - - + + - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - + + + + - - - - - + + + - - - + + + + - - - - + + + - - - - + + + + + - - - - - - - - - + + + + + + + + - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - - - - - - - - - - - - - - - - - - - - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - - - - - - - - - - - - - - - - - - - - - - - + + - - + + + + + + + + + + + + + + + + + - + - + - - - - + - - - - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + - + + + + - + - + - - + + + + - - + + - + + - + - - + + - - + + - - + + - - + + - - + + - - - - + + - - + + - - + + + + + + + + + + + + + + + + + + + + + + + - + - + - - - - - + + - + - - + + + + + + + + + + + + + + + + + + + - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + + + + - + - + - - - - - - - - - - + - - - - - - - - - - + - + + + + - + - + - - - - + - + + + + + + + + - + - + - + + + + - + + + + - + - - + + - - - - + + - - + + - - + + - - + + - - + + - - - - - - - - - + + + + - + - - - - - - - - - - - - - - + - - + + - - + + - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - + - + - + - + - + - - - - - - - + - + - + - - - - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + - + - + - + - + - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + - - + + - - + + - - + + + + - - + + - - + + - - - - + - + - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + + + + + + + + + + - + + + + + + + - - - - + - - - - + - + - + - + - - - - - - - - - - - - - - - - + - - - - - - - - + + + + - - + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + - - - - - + + + + + - - - - - - + + + + - - - - - - - + + + + - - + + - - - - - - - - - - - - - - - + + + + + + + + + + + - - - - - - - - - + + + + + + + - - - - + + + + + - - - + + + + + - - - - + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + - - - - - + + + + + + + + + + + + + + + + + + + - - - - + + + + + + - - - - + + + + + + + + + - - - - - - - - + + + + - - - - + + + + + - - - - - - + + + + - - - - + + + + + + + + - - - - - - + + + + + - - - - + + + + + - + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - + + + + + - - + + + + + + + + + + + + + + + + + + + + + + - - - + + + + - - + + - - - - - + + + + + - - - - - + + + - - - + + + - - + - - - - - - + + - - - + + + + + + + - - + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + - + - + - + - + - + - + - + - + - + - + - + - - + + + + - - + + - - + + - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - - - - - - - + - + - + - - + + + + + - + - - - - + + - - + + + + + - - - - + + + + + + + + - + + + + + + + + + + - - - - - - - - - - + - - + + - - + + - - - - - - - - - + + - - + + - - + + - - - - + + - - + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + - + - - - - + - - + + - - + + - - - - + - - + + - - + + - - + + - + + + + + + + + + + + + + + + + + + + + + - + - + - + - + - + - + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + - - - + + + - - - + + + - - - - - + + + - - - - + + + + + - - - + + + + - - - - + + + - - - - + + + - - - - + + + + + - - - - + + + + + - - - - + + + - - - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - - - - + + + + - - - - + + + + - - - + + + + - - - - - + + + + + - - - - + + + + + + + + + + + + + + + + + + + + - - - - - + + + + + - - - + + + + - - + + + + + + - - - - + + + + + - - - - - - - - + + + + + - + - - - - + - - - - + - - - - - - - - - - - - - - - - - - - - - - + - - - - - - - - - - - - - - + + - - + + + + - - + + - - + + - - + + - - + + - - + + - - + + + + - - + + - - + + - + - - - - + - + - - - - + - - - - - - - + - + - - - - - - - + - - - - + - - - - - - - - - - + - + - + - + - - - - + - - - - + - - - - + - + - - - - - - - - + + + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + + + - - + + - - + + - - + + + + - - + + - - + + - - + + - - + + - - + + - + - + - + - + - + - - - - + - - + + + + + + + + + + + + - - + + - - + + + + - - + + - + - + - + - - - - - - - - - - - + + - + - + - - - - + - - - - + - - - - + - - - - + - - - - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + - - - - - - - - - - - - - - - - - + + - - + + - + - - - - + - + - - - - - - - - - - - - - - - - + - + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + + - + - + - + - - + + - - + + - - + + - - + + - - + + + + + + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - - - - - - - - + + - - + + - - + + - - + + - - - - + + - - + + - - + + - - - - + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + + + + + + + + + + + + + + + - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - + + + - - - - + + + + + + - - - - - + + + + + - - - - - - + + + + + + - - - - + + + + - - - - + + + + + - - - - + + + + + - - - - - + + + + + - - - + + + + + - - - - - - + + + + + - - - + + + + - - - - - - - - - - - + + + + - - - + + + + + - - - + + + + + - - - + + + + - - - + + + + + - - - - + + + + + + + + - - - - - + + + + - - - - + + + + + + - - - - + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + - - - + + + + + - - + - - - - - - - + + + + + + + + + + + + - - - - - + + + - - - - - + + + - - - - - - - - - - - - - - - + + + - - - - - - - - + + + - - - - + + + + + + + + + + + + + + + + + + + - - - - + + + + + + - - + + + + + + + - - + + - + - - - - + - + - - - - + - - + + - - + + - + - - - - - - - - - - - - - - - - - - - + - + - + - - - - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - + - + - + - + + + + - + - + - - - - + - - - - - - - + - - + + + + - - + + - + + + + + + - - - - + - - - - + - + - + - - - - + - + - + + + + + + - + - - + + - - + + - + - - - - - - - + - - + + - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + - - - - + + + - - - + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - + + + + + + - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + + + + - - - - - - - - - - - + + + + + + + + + + + + + + + + - - - + + + + - - - - - + + + + + - - - + + + + - - - + + + + - - - - + + + - - - - - - + + + - - - - + + + + + - - - + + + + - - - - + + + - - - - - - - + + + + - - - - + + + + - - - + + + + - - - - + + + + - - - + + + + - - - + + + + + + + + - - - + + + - - - - - + + + - - - - - - + + + - - - - - - - - - + + + + + + + + + + + + + + - - - + + + + + - - - + + + + - - - + + + + + + - - - + + + - - + + - - - - + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - + - + - + - + - + + + + - + - + - + + + + - + - + - + - + - + - + + + + + + + + + + + + + - + + + + + + + + + + + + + + + + + + + + + + - + - + - + + + + - + + + + - + + + + - + + + + - + - + - + - + - + + + + - + + + + - + - + - - + + + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + + + - - + + - - + + - - + + - - + + - - + + - - + + + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + + + - - + + - - + + - - + + - - + + + + + - + + - - - - + - - + + - + - + - - + + - - + + - + - - + + - - + + - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + + + + + + - + - + - + - + - + - + - - + + - - + + - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + + + + - + - + - + - + - - - - + - - + + - - + + - - + + + + - - + + - - + + - - + + - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + + + - - + + - - + + - - + + - - + + - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - - + + + - - + + - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + + + + - + + + + - + + + + - + + + + - + - - + + + + - - + + - - + + - - + + + + - - + + - - + + - - + + - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + + + - - + + - - + + + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + - - + + + + - - + + - - + + + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - - - - - - - - - - - - - - - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + - - - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - - - - - - - - - + - + - - - - - - - - - - - - - - - - + + + + + + + + - + - - - - + - - - - - - - - - - - - - - - - - - - - - - + - - + + - - + + - - + + - - + + - - + + - - + + - + + + + - + - + - + + + + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + - - - - - - - - - - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - - - + + - - + + - - - - + + - - + + - - + + - - - - + + - - + + - - - - + + - - + + - - - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - - - + + - - + + - - - - + + - - + + - - - - + + - - + + - - + + - - + + - - - - + + - - + + - - + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - - - - - + + - - + + - - + + - - + + - - + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + - - + + - - - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - - - + + - - + + - - + + - - - - + + - - + + - - + + - - + + - - + + - - + + - - + + - + - + - - - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - - - + + - - + + - - + + - - - - + + - - + + - - + + - - + + - - + + - - - - - - - - - + + - - + + - - + + - - - - + + - - + + - - - - + + - - + + - - - - + + - - + + - - - - + + - - + + - - + + - - + + - - - - + + - - + + - - + + - - + + - - + + + + + + + - + - + - + + + + + + + + + + - + - + - - + + + + + - + - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - - - - - - - + - + - - - - + - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - - - + + - - + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - + + + + + + + - + - + - - + + - - + + - - + + + + + + + + + + + + + + - + - - - - + - - - + + + + - - + + + + + + + + + + + + + + + + - - + + - + - + - + - - - - - - + - - + + - - - - + + - - + + - - + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - + + + + + + + + + + + + - - - - - - - + + + - - - - - - + + + + + - - - + + + - - - - + + + - - + + + - - + + + + + + + + + + - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - + + + - - - - + + + - - - - - + + + + + - - - - + + + + - - - - - - + + + + - - - - + + + + - - - - - + + + - - - - + + + - - - - - + + + + + + - - - - + + + + - - - + + + + + - - - + + + - - - + + + + - - - + + + + + - - - + + + + + + - - + + - - - - + + - - - - + + - - - - + + - - - - + + + + + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - - - - - - - + + + + + + + + + + + - - + + - - + + - - - + + + - - - - + + + + - + - - - + + + - - + + - - + + - - - + + + - - - - + + + + - - - - + + + + - - - - + + + + - - + + - + - - - - - + + - - + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + - + - - + + - - + + + + + + + + - + - + - - + + - - + + - - - - + + + + + + + + - - + + - + - - + + - - + + - - + + - - + + - - - - + + + + - - - + + + - - + + - - + + - - + + - - + + - - - - + + + + - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + - - + + - - - + + + - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + - + - - + + - - - + + + - - - + + + - - + + - + - + - + - + - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - + - + - + - + - + - + - + - + - + - - + + - - + + @@ -50446,29 +52392,29 @@ - - + + - + - + - + - + - + - + @@ -50486,54 +52432,54 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + @@ -50543,1491 +52489,1485 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - + + + + + + + + + - - + + - - - - - - + + + + + + - - - + + + - + - - - - + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + - - + + - + - + - + - + - + - + - + - + - + - - + + - - + + - + - - - - + + + + + + + + + + + - - + + - + - - - - - - - - - - + + + - + - + - + - + - + - - - + + + - - + + - - - - - - + + + + + + - - - + + + - - - + + + - + - + - - + + - - + + - - + + - + - - + + - + - + - - + + - - + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + - + - - + + - + - + - - + + - + - + - - - - - + + + + + - - - + + + - - + + - - + + - - - + + + - - - - - + + + + + - - - - + + + + - - - - - + + + + + - - - - - + + + + + - - - - + + + + - - + + - - - + + + - - - + + + - - + + - + - - + + - - - + + + - + - - + + - - + + - - + + - - + + - - + + - - + + - + - + - - + + - - + + - - + + - - + + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - - - + + - - + + - - + + - - + + - - + + - + - - - - + - - - - - - + - - + + - - + + - - + + - - + + - - + + - - + + - - - - + + - - + + - - - - + + - - - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + + + + + + - - + + - - + + - - + + - - + + + + - - + + - - + + - - + + - - + + - - + + - + - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - + - + - - + + - - + + - - + + - + - - + + + + + + + + + + + - + - - - - + - + - + - + - - + + - - + + - - + + + + + + + + + + + + + + + - - - - + - + + + + - + - + - + - + - - - - - - - - - - - - - - - - + - + - - - - - - - - - - + - - - - - - - - - - + - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - - - + + - - + + - - + + - - + + - - - - + + - - + + - - + + - - + + - - - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - - - + + - - + + - - + + - - + + - - + + - + - - - - + - - - - + + - + - - - - + - + - + - - - - + - - + + - - + + - - + + - - + + - - + + - - + + - + - + - + - + - + - + + + + + + + + + + - + - - - - + - - + + - - + + - - + + - - - - + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - + - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + + + + - + - - + + + + - - + + - - + + - - + + - - + + + + - - + + - - + + - - + + - - + + + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + + + - - + + - - + + + + + + + + - + - + - - + + + + + + + - + - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - + - - + + - - + + - - + + - - + + - - + + + + + + + - - + + - + - + - + - + @@ -52062,399 +54002,399 @@ - + - + - + - + - + - + - + - + - + - + - + - + - - - + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - + - + - - + + - + - - + + @@ -52463,47 +54403,47 @@ - - + + - + - + - + - + - + - + - + - + - + - + - + - - - + + + @@ -52513,145 +54453,145 @@ - - - + + + - + - + - - + + - + - + - + - + - + - + - + - + - + - - + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + @@ -52662,899 +54602,973 @@ - - + + - + - + - + - + - + - + - + - - + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + - + + + + + + + + - - - - - + + + + + + + + + + + + + + + + + + + - - + + - - - + + + - - + + + + + + + + + + + + + + + + + + + - + - + - - - + + + + + + + + + + + + + - - - - - - + + + + + + - - - - - - - - - - + + + - - - - - + + + + + + + + + + + + - - - + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + - - - - - - - + + + + + + + - - - - - - - + + + + + + + + + + + + + + + - - - - - - - - + + + + - - - - - - - - - - - - - - - - - - - - + + + + + + + + - + - - + + - + - + - - + + - - + + - - - + + + - - - + + + - - - - + + + + - - - - + + + + - - - + + + - - - - - - + + + + + + - - - - + + + + - - - + + + - - - + + + - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - + + + + + + + + + + + + + + + + + - - - - + + + + + - - - - + + + + - - - - + + + + + - - - - + + + + + - - - - - + + + + - - - - - + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + - - - - + + + + + - - - - - - - - - - - - - - - - - - - - - + + + - - - + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - + + + - + - - - - + + + + - - - + + + - - + + - - - + + + - - + + - - - + + + - - + + - - + + - - - + + + - + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - + + - - + + - - - + + + - - - - + + + + - - - + + + - - - - + + + + - - - + + + - - - - + + + + - - - - - + + + + + - - - + + + - - + + - - + + - - - - - - - - - + + + - + + + + + + + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - + - + - + - + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - - - - - - - - - - - - - - + + + + + + + + + + + + + + - - - - + + + + - + - - - - + + + + - - + + - + - - - - + + + + - - - - + + + + - - + + @@ -53563,2206 +55577,2424 @@ - + - - + + - - - + + + - - + + - - + + - - - + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - + - + - - + + - - + + - - - - + - + - + - + - + - + - + - + + + + + + + + + + + + + - - + + + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - - - - - - - - - - + + + + + + + + + + + - + - + - + - - + + - - + + - + - + - + - + - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - - - + + + - - - - + + + + - - + + - - - - - + + + + + - - - + + + - - - + + + - - - - + + + + - - - - + + + + - - - - + + + + - - - + + + - - - - + + + + - - - - + + + + - - - - + + + + - - - - + + + + - - - - - + + + + + - - - - + + + + - - + + - - + + + + + + + + + + + + - - + + - + - - - + + + - - + + - + - - - + + + - - - + + + + + + + + + - - - + + + + + - - - - + + + + + + + + - - - - + + + + - - - - + + + + - - - - + + + + - - - + + + - - + + - - - - - + + + + + - - - + + + - - + + - - - - - - + + + + + + - + - - - - - - + + + + + + - - - - + + + + - - - - + + + + - - - - + + + + - - + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - - + + - - + + - - + + - - + + - - + + + + + - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + + + + + + + + + + + + + + + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - - + + - - + + - - + + - - + + - - + + - + - + - + - + - + - + - + - + - + - + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - - - - - - - - - + - + - + - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - + + + + + + + + + + + + + + + + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - - - - - - - - - + + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - - + + - - - - - - - + + + + + + + + + + + + - - - - - - + - - - + + + - - - + + + - - + + - - + + - - + + - - + + - - - - - - - + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - - - + - + - + - - - - - + + - - + + - + + + + - + - - + + - - + + - - + + - - + + - - + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + + + + + + + + + + + + + + + + + + + + + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - - - - - - - - + @@ -55771,113 +58003,113 @@ - + - + - + - + - + - + - + - + - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + @@ -55891,549 +58123,576 @@ - + - + - + - + - + - + - + - + - + - + - + - - + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - - - + + + - - - - + + + + - - - + + + - + - - - + + + - - - + + + - - - + + + - - - - + + + + - - + + - - - - - + + + + + - - - - - - - + + + + + + + - - - - - + + + + + - - - + + + - - + + - - + + - - - + + + - - - - + + + + - - - + + + - - - + + + - - - + + + - - + + - - - - + + + + - - - - + + + + - - - + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + - - - - - + + + - - - - - + + + - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - - - - - - + + + + + - - - - - + + + + + - - - + + + + + - - + + + - - - - + + - - - - + + + + - - + + + - - - + + - - + + + - - + + - - + + - - - + + - + + - - + + - - - + + - - + + + - - + + - - + + - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + - - + + - - - + + + + + - - - + + + - - - - - + + + - - - - + + + + + + + + + + - - - + + + - - - + + + - - - - + + + + - - + + @@ -56441,617 +58700,673 @@ - - - + + - - + + - - + + - - + + - + - - + + - - + + - - + + - - + + - - - - - - - + + + + + + + - - + + - - + + - + - - + + - + - + - + - - + + - + - + - + - - + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - + + - - + + - - + + - - + + - - + + - - + + - - + + + + - - + + - - + + - - + + + + + + + + - + - + + + + + + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - - - - + - - + + - - + + - - + + - - + + - - + + - + - + - - + + - - + + - - + + - - + + - - + + - - + + + + + + + + + + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + + + + + + + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - + - - - - - + + - - + + - - + + - + - - + + - + + + + + + + - - + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - + - + - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + + + + + + + + + + + + + - - + + @@ -57062,8 +59377,8 @@ - - + + @@ -57072,713 +59387,779 @@ - + - + - + - + - + - + - + - + + + + + + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + - + - + - - - - - - - - - - - - - + - + - - - - - - - - - - - - - - - - - - - - - + - + - + + + + - + - + - + - + - - - - + - + - + - + - + - + - - + + - + - + - - - - - - - - - - - - - - - - - - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - - + + + + + + + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + + - + + + + + + + + + + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - - - - - - - - - - - - - - - - - - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -57796,190 +60177,190 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - - - - - - + - + - + - + - + - - + + - - + + + + + - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + + + + + + + - - + + - - + + - - + + - - + + - - + + - - + + - + - + - + - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - + @@ -58005,7 +60386,7 @@ - + @@ -58025,19 +60406,19 @@ - + - + - + - + @@ -58052,22 +60433,22 @@ - + - - + + - + - + - + @@ -58081,115 +60462,115 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -58198,9 +60579,9 @@ - + - + @@ -58209,9 +60590,9 @@ - + - + @@ -58220,13 +60601,13 @@ - + - + - - + + @@ -58235,57 +60616,57 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -58294,607 +60675,617 @@ - + - + - + - + - + - + - + - + - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - + - + - + - + - + - + - - - - - - - + - + - + - - + + - - + + + + + - + - - + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + - - - - - + + + + + - - - + + + - - - + + + - + - + - + - + - - - - - - + + + + + + - - - - - + + + + + - - + + - - + + - - - + + + - - + + - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - + + - - - - + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + - - + + - - - + + + - - - + + + - - - + + + - - - + + + - - + + - - + + @@ -58904,671 +61295,671 @@ - + - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + - - - + + + - - - - + + + + - - + + - + - + - - - - + + + + - - + + - - - - - + + + + + - - - - - + + + + + - + - + - + - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - + - + - - + + - - + + - + - - + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - - + + - + - + - - + + - - + + - - + + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - - + + - - + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - + + + - - + + - + - - + + - - - + + + - - - + + + - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - - + + + - - - + + + - - - + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - + + + + - - - + + + - - + + - - + + - - + + - + - + - + - + - + - + - + - + - + - - + + - - + + - - + + - - + + - - + + - + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - + + - - + + - - + + - - - - + - + - + - + - + - + - + - + - + - + - + - + + + + + + + - + @@ -59594,16 +61985,16 @@ - + - + - + - + @@ -59654,10 +62045,10 @@ - + - + @@ -59668,12 +62059,12 @@ - + - + @@ -59688,19 +62079,19 @@ - + - + - + - + @@ -59709,30 +62100,30 @@ - + - + - + - + - + - + - + - + - + @@ -59762,43 +62153,43 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -59824,86 +62215,86 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -59911,90 +62302,90 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -60014,7 +62405,7 @@ - + @@ -60034,7 +62425,7 @@ - + @@ -60054,58 +62445,58 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -60113,703 +62504,659 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + - - - - + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - + - - - - + + + + - + - - + + - - - + + + - - - + + + - - - + + + - - - - + + + + - - - - - + + + + + - - + + - - + + - - - - - + + + + + - + - + - + - - - - + - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + - + - - - + + + - + - + - - + + - - + + - - + + - - + + - + - - - - + + + + - - - + + + - - - + + + - - - - - - + + + + + + - - + + - - + + - - + + - + - - - - - + + + + + - - - - - - + + + + + + - - - - - + + + + + - + - + - + - - + + - - + + - - - + + + - + - - - - + + + + - - - - - + + + + + - - + + - - - + + + - - - - - + + + + + - - - + + + - - - + + + - - + + - - - + + + - - + + - + - - + + - - - - + + + + - - + + - - - + + + - - - - - + + + + + - - - + + + - + - - - - - - - - - - - - - - - - - + - - - - - + + + + + - - - - - - - - - - - + + + + + + - - - - + + + + - + - - - + + + - - - + + + - - + + - - + + - + - - - + + + - - - - - + + + + + + + + + + + + + + + + + + + + + - + - + - + - - + + - + - - - - - + + + + + - - - - + + + + - + - - - + + + - - - - - + + + + + - - - - - + + + + + - - + + - - + + - - - - + + + + - - - + + + - + - + - + - + - + - + - + - - - - + + + + - - + + - - + + - + - - - - - - - - + + + - - + + - - - - - - - - - - - - - - - - - - - - - - + + + - + - - + + - - - - + + + + - - + + - + @@ -60818,588 +63165,569 @@ - - + + - - + + - + - - - - - + + + + + - - - + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - - - - - + + - - - - + + + + - - - - - + + + + + - + - - - - - + + + + + - - - - - - - - - - + + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - - - + + - - - + + + - + - + - + - - - + + + - - - - - + + + + + - - - + + + - - + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - + + + - - + + - - - - + + + + - - - - - - + + + + + + - + - - + + - - - + + + - - + + - - + + - - + + - + - + - + - + - + - + - - + + - - + + - + - + - + - + - + - - + + - - + + - - + + - + - - + + - - + + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + @@ -61407,553 +63735,530 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + - + - - + + - - - + + + - - - - - - - - + + + + + + + + - - - + + + - + - - - + + + - + - - - + + + - - - - + + + + - - + + - + - + - + - + - + - - + + - - - - - - - - - - + + + - - - - - + + + + + + - - - - + + + + - - - - + + + + - - - - + + + + - - - - - - - - - - - + + + + + - + - + - + - + - - - - - - - - - - - - + - - - - - - - + + + + + + + - + - - - - - + + + + + - - - - - - + + + + + + - - - + + + - - - + + + - - - - + + + + - - - + + + - + - - - + + + - - + + - - + + - - - - - - + + + + + + - - - + + + - - - + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - + + + + + - - - - + + + + - + - - - - + + + + - + - - - - - + + + + + - + - + - - - - - - - + + + + + + + - - + + - + - + - - - + + + - - - - + + + + - - - - - + + + + + - - + + - - - - - - - + + + + + + + - - + + - - - + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + @@ -61962,104 +64267,114 @@ - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - + + + + - + - - - - + + + + - + - - - - - - - - + + + + + + + + - - - + + + - - + + - - + + - - - - + + + + - - - - + + + + - - - + + + - + - + - - - - + + + + - - - - + + + + @@ -62067,501 +64382,531 @@ + + - - - + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + - - - + + + + + + + + + + + + + + + + + + + + + + + + + + - - - + + + - - - + + + - - + + + + + + + + + + + + + + + + + - - + + - + - - + + - - - - + + + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - + + + + + - - + + - - - + + + - - - + + + - - - - - + + + + + + - - - - - - - - - - - - - - - - + + - - - + + + - - - - + + + + - - - - + + + + - - - + + + + + + + + + + + - - - + + + - - + + + + + + + + + + - + - - + + - - - - - - + + + + + + - + - + - + - + - + - + - + - + - - - - - - + + + + + + - - + + - - - - - - - - - - - - - + + + + + + + + + + + + + - + - + - + - - + + + + + + + + @@ -62608,127 +64953,136 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + - + - - - - + + + + - + @@ -62736,7 +65090,7 @@ - + @@ -62745,131 +65099,141 @@ - - - - - - - + + + + + + + + + + + + - + - - + + - - - - - - - + + + + + + + - + - - + + - - + + - - - + + + - - + + - + - + - - - + + + + + + + + + + + + - - + + - + - - + + - - - + + + - - - + + + + - - + + - - - - - + + + + + - - - - - - + + + + + + - - - - - - + + + + + + - - - - - - + + - - + @@ -62892,13 +65256,13 @@ - + - + - + @@ -62913,7 +65277,7 @@ - + @@ -62940,15 +65304,15 @@ - + - + - + @@ -62968,581 +65332,595 @@ - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + - - + + - - - - - + + - - - - - + + + + + - + - - - + + + - - - - - + + + + + - + - - - - + + + + - + - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - + + + + + + + + + + + + + + - - + + - - - + + + - - - - - + + + + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - + + + + - - - - - - - + + + + + + + - + - + - + - - + + - + - + - + - - + + + + - - + + - + - + - + - + - + - - - - + + + + - - - - + + + + - + - + - - - - + + + + - - + + - - - + + + - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - - - - - - - + + + + + + + - + - - - - - - - + + + + + + + - - - + + + - + - - - + + + - + - + - - - + + + - + - - - + + + - - - + + + - + - - - - + + + + - - - + + + - + - - - + + + - - + + - + - - + + - - - - - - + + + + + + - - - - + + + + - - - - - - - - - - - - - - - - - - - - - - - - - + + + + - - + + - - + + - - + + - - - + + + - - + + - + + + + - + - + - - - + + + - - + + - - - + + + - + - - - - + + + + - + - - - - + + + + - - + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -63550,13 +65928,13 @@ - + - + - + @@ -63564,98 +65942,104 @@ - - + + - - - + + + - + - - - + + + - - - + + + - + - + - + - - - + + + + + + + + + - + - - + + - - + + - - + + - + - - + + - + - - + + - - + + - - + + - - + + - + @@ -63663,546 +66047,544 @@ - + - + - + - + - - - + + + - - + + - - - - + + + + - + - + - - + + - - - - - + + + + + - + - + - - + + - + - - - + + + - - - + + + - - - + + + - - - + + + - - - - + + + + - - - + + + - - - + + + - - - - + + + + - - + + - + - + - + - + - + - + - - - + + - - - - - - - - - - - - + + + + - - + + + + + - + - - - - - - - - - + + + - - - - - + + + + + - - - - - - - - - - - - - - - + + + + + + + + + + - - - - - + + + + + + + + + + - - - - - - + + + + + + + + + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - + + + - - - + + + + - - - + + + - + - + - + - + - - + + - - + + - - + + - - + + - + - - - - - - - - - - + + + - - - + + + - - + + - - - + + + - - + + - - - + + + - - - - + + + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - + + + + + - + - - + + - - - + + + - - - + + + - - - + + + - - + + - + - - - - + + + + + + - - + + + + + + - - - + + + - - + + @@ -64216,830 +66598,893 @@ - - - - + + + + - + - - - + + + - - + + - - - + + + - - - + + + - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + - - - - - - + + + + + + - - - + + + - - - - - - - - - - - - + + + + + + + + + + + + - - - + + + - - + + - - + + - - - + + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + + + + + + + + + + + + + - + + + + + + + + + + + + + + + + + + + - + + + + - + - + - + - + - + - + - + - + - - + + - - + + - + + + + - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + + + + - - + + - - + + + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + + + + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + + - + - + - + - + - + - - - + + + - + - + - + - - + + + - + - + @@ -65071,47 +67516,46 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - + + + + + - + @@ -65122,75 +67566,75 @@ - - - + + + - - - - - + + + + + - - - - + + + + - - - - + + + + - - - - - - - - - - - - - + + + + + + + - - - - - + + + + + - - - - + + + + + - - + + + + + + + - - - + + + - - - - + + + + - - - + + + - - - + + + @@ -65200,47 +67644,47 @@ - - - - + + + + - - + + - - - + + + - + - + - + - - - + + + - - + + - - + + - - + + @@ -65256,932 +67700,955 @@ - - - + - + - - + + - - + + - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - + - - + + - - + + - + - - + + - - + + - - + + - + - - + + - + - - + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + - - + + - - + + - + - + - + - - + + - - + + - - + + - - + + - - + + - + - - + + - - + + - - + + - + - + - - + + - - + + - - + + - - + + - - + + - + - - + + - + - - + + - - + + - + - - + + - - + + - - + + - + - + - + - - + + - + - + - - + + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - + - - + + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + + + + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -66192,316 +68659,340 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - + + - + - - + + - - + + - - + + + + + + + + + + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + + + + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + @@ -66511,1477 +69002,1513 @@ - + - + - + - + - + - + - + - + - + - + - - - + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - + - - + + - - + + - - - - - - - - - - + - - - - - - - + - - - - - + + - - + + - - + + - - + + - - + + - + - + - + - + - + - + - + - + + + + - + - - - - - - - + - + - + + + + - + + + + - + - + + + + - + - + - - + + + + + + + + + + + + + + + + + - + - + - + - + + + + + + + + + + + + + + + + + + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - - - - - - - - + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - - + + - - + + - - + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -67993,499 +70520,529 @@ - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - - - + + + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + + + + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + + + + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - + + - - + + - - + + - + - - + + - - + + - - + + - - + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - + - + - + - + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + @@ -68496,2373 +71053,2451 @@ - + - + - - + + - - + + - - + + + + + + + + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - - + + - - + + - + - - - - - - - - - - - - - - - - - - - - - - + - - - - + - + - + - + + + + - + - - + + - - + + - - + + - - + + - - + + - - + + + + + + + + - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - + + + + + + + - + - - + + - + - - + + - - + + - - + + - - + + - - + + - - + + + + + + + + + + + - + - + - + - + - + - + - + - + - + - - + + - - + + - - + + - - + + - - + + - + - - + + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - - - - + - + + + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - + - + - - + + - - + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + + + + + + + + + + + - + - + - + - + - + - + - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + + + + + + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + + + + + + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - - - - - - - - - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - - - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - - - + - - + + - - + + - - + + - + - + - + - + - - + + - - + + - - + + - - + + - - + + - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + + + + + + + + + + + + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + + + + + - - + + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - - + + + + + + + + + + + + + + - - + + - + - + - + - + - + - + - + - + - - - + + + - + - + - + - + - + - + - + - + - + - - - + + + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - + - - + + - + - + - + - - - - - - - - - - - + + - + - + - + - - + + - + + + + + + + + + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - + + + - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + + + + + + + + + + + - + - + - + - + - - - - - - - - - - + + + + + + + + + + - - - - - + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - - + + - + - + - + - + - + - + - + - + - + - + @@ -70963,286 +73598,290 @@ - - - - - - - - - - - + + + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - - + + - - + + - - + + - - + + - - - + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + + + + + + + + + + + - - - - + + + + - + - - - - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - - - - - - + - - + + - - + + - - + + - - + + - - + + - - + + - - - - - - - + - - - - - + + - - + + - - + + - + - - + + - - + + - + - - + + + + + + + + - + - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + + + + + + + + + + + + + - + - + - + - + - + - + - + - + - + @@ -71251,7 +73890,7 @@ - + @@ -71260,672 +73899,699 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - - + + - - + + - - + + - + - + - + - + - - + + - - + + - - + + - + - + - + - + - + - + - + - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - - + + - - + + - - + + - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + + + + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + + + + + + + + + + + - - + + - + - + - + - + - + - + - + @@ -71934,419 +74600,437 @@ - - - + + + - + - - + + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - - + + - - + + - - + + - - + + - - + + - + - + - + - + - + - - + + - + - + - + - + - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - + - + - + - + - + - + - - - - - - - + - + - + - + - + - + - + - + - + - + - + + + + + + + - + - + - - + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + + + + + + + + + + + + + + - - + + - + - + - + - - + + - + - + - + - + - + - + + + + - + - + - + - + - + - + - + - + @@ -72358,296 +75042,308 @@ - + - - + + - - + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + + + + + + + + + + + - + - + - + - + - + - + - - - - + - + - - + + - - + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + @@ -72656,22 +75352,22 @@ - - + + - + - - + + - + - + @@ -72682,7 +75378,7 @@ - + @@ -72699,7 +75395,7 @@ - + @@ -72724,7 +75420,7 @@ - + @@ -72841,13 +75537,13 @@ - + - + - + @@ -72855,368 +75551,368 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - - + + - - + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - + @@ -73245,15 +75941,15 @@ - + - + - + @@ -73282,969 +75978,1044 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + + + + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - - - + + + - + - + - + - + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + + + + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + + + + + + + + + + + - + - + - + - - - + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + + + + + + + + - + - + - + + + + + + + - + - + - + - + - + - + - + - + - + - + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + @@ -74252,2228 +77023,2229 @@ - + - + - + - + - + - + - + - - + + - + - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - + + + - - - - + + + + - - + + - - - + + + - - + + - + - - - + + + - - - + + + - - - + + + - - - - - + + + + + - - - + + + - - - + + + - - - - + + + + - - - - - - + + + + + + - - - - - - - - - - - - - + + + + + - - - - + + + + + + + + + + + + - - - + + + - - - + + + - - - - - + + + + + - - - - - + + + + + - - - + + + - - - - + + + + - - - - + + + + - - - + + + - - - + + + - - + + - + - - + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + - - - + + + - - - + + + - - - + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + - - - - + + + + + - - - - + + + - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - - - - + + + + - - - + + + - - - + + + - - + + - - + + - - - - + + + + - - - - - + + + + + - - - - + + + + - - - - + + + + - - - + + + - - - - + + + + - - + + - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - + + + + + - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - + + + + + + + + + - - - - + + + + + + + + + + - - - + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - - - - + + + + - - - - - + + + + - - - - - - + + + + + - - - - - - + + + - - - - + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + - - - - + + + + + - - - - - + + + + + + + - - - - - - - + + + + + + + - - - - - - - + + + + + - - - - - + + + + + + - - - - - - + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - + - - + + - + - - - + + + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + - - - + + + - - - + + + - - - - + + + + - - - + + + - + - - - - - - + + + + + + - - - - - + + + + + - - - - + + + + - - - - - + + + + + - - + + - - + + - - - - + + + + - - - - - + + + + + - - - - + + + + - - - - - + + + + + - - - + + + - - - + + + - - - - - + + + + + - - - - + + + + - - - - - - - - - - - - - - - - - + + + + + - - - - + + + + + + + + + + + + + + + + - + - - - + + + - - - - - - - - - - - - + + + + - - - + - - - - - - - - - - - - - - + + + + + - - - - - + + + + + + + - - - - + + + - - - + + + + + + + + + + + + + + + + + + + + + - - - - + + + + - - - + + + - - - + + + - - - - + + + + - - + + - + - - - - + + + + - - - + + + - - - - + + + + - - - + + + - - - - + + + + - - - + + + - + - - + + - - - - + + + + - - - - - + + + + + - - - - + + + + - - + + - - - - - - - - - - - - - - - - - - - - - - - + + + + + - - - + + + + + + + + + + + + + + + + + + + + + - - - - + + + + - - + + - - + + - - + + - + - - - - - + + + + + - - + + - + - + - - - - + + + + - + - - - - + + + + - - - + + + - - + + - - - - + + + + - - + + - - + + - - + + - - - - - - - - + + + + + + + + - - - - + + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - - - - - + + + + + + + - - + + - - - - + + + + - - + + - - - - - - - - + + - - - + + + + + - - - - + + + - - - - + + + + - - - + + + + - - - - + + + - - - + + + + - - + - - - + - - - + + + + + + - - + + + + + + - - + + - - - - + + + + - - - + + + - - - + + + - - + + - - - + + + - - + + - - + + - - - + + + - - - + + + - - + + - - - + + + - - + + - - + + - - - - - + + + + + - - - - - - + + + + + + - - - + + + - - - - - + + + + + - - - - - - + + + + + + - - + + - - - - - - + + + + + + - - - - + + + + - - - - - + + + + + - - + + - - + + - - + + - - - + + + - - + + - - - + + + - - - + + + - - + + - - + + - - - + + + - - + + - - - + + + - - + + - - + + - - - - + + + + - - + + - + - + - - + + - + - - + + - + - + - - + + - - + + - - - + + + - - - + + + - - - - + + + + - - - + + + - - - + + + - - + + - - - + + + - - - + + + - - - + + + - - + + - - - + + + - - - - + + + + - - - - + + + + - - - - + + + + - - - + + + - - + + - - - + + + - - + + - - + + - - + + - - - + + + - - + + - - - - + + + + - - - + + + - - - - + + + + - - - + + + - - - - - - - + + + + + + + - - - - - - - + + + + + + + - - - - - - + + + + + + - - - - + + + + - - - + + + - + @@ -76481,556 +79253,554 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - - + + - - + + - - - + + + - - + + - + - - - + + + - - - - + + + + - - - + + + - - - - + + + + - - - - - - - - - + + + + + + + + + - - - - - - + + + + + + - - + + - - + + - - - - + + + + + + + - - - + + + - + - + - + - - + + - - - - + + + + - + - - + + - + - + - + - - + + - - - + + + + - - + + - + - - - - - - - + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - - - - - - - - - - + + + + + + + + + + + + + + - - - - - - - - - - - - - + + + + + + + - - - - - + + + + + - - + + - - - - - + + + + + - - - - + + + + - - - - + + + + - - - - + + + + - - - + + + - + - - - - - - - + - + + + + + + + - - + + - - + + - - + + - + - - - + + + - + - - - + + + - + - - + + - - + + - - + + - + - + - + - - + + - - + + - + - + @@ -77039,200 +79809,261 @@ - - + + - - + + - - + + - - + + - - + + - - - - + + + + + + + + + + + + - + + - - + + - - + + - + - + - + - + - + - + - + - + - + - + - - - + + + - + - + - - + + - - + + - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - + + + + + + + - - - - - - - - - + + + + + + + + + - - - - - - - - - - - + + + + + + + + + + + - - - - - - - - - - - - + + + + + + + + + + + + - - - - - - - - - - - - - + + + + + + + + + + + + + - + - + - + - + @@ -77241,162 +80072,349 @@ - + - + - - + + - + - + - + - + - + - + - - + + - + - + - + - + - + - - - - + + + + + - - - - + + + + - - - + + + - - - - + + - - - - + + + + - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - + + + + + - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + - + - + - - - - + + + + - - + + - - + + - + - + - + - + - + @@ -77405,378 +80423,395 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - - - - - - - + + - - + + - + - - + + - + - - + + + + + + + + - + - - + + + + + - + - + - + - + - + - + - + - - - - - - + + + + + + - - - + + + - - - + + + - - + + - - - - + + + + - - - - - - + + + + + + - - - + + + - - - - - + + + + + - + + + + + + - + - + - + - - + + - - + + - - - - + + + + - - - + + + - - - + + + - - - + + + - + - - - + + + - - + + - - - + + + + + + + + - + - - + + - - - + + + + + + + + + + - - - - - + + + + + - - - + + + - - - - - + + + + + - - - + + + - - - - - + + + + + - - - + + + - - - - + + + + - - - - + + + + - - + + - - - + + + - - - - + + + + - - - + + + - + @@ -77784,621 +80819,852 @@ - - - + + + - - - - - - - - + + + + + + + + + + + + + + + + + - - + + - - + + - + - - - + + + - - - + + + - - + - - - - - - - - - - - - - - - - - - + + + + - - - - - + + + + + - - - - + + + + - - - - + + + + - - + + - + - + - - - - - + + + + + - - - - + + + + + - + - - - + + + - - - - - + + + + + - + - - - + + + - - - - - - + + + + + + - + - - + + - - - + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - + + + + + - - + + - + - - + + - - + + - + - - - + + + - - - + + + - + - + - - - - + + + + - - + + - - - - + + + + - - - + + + - + - - + + - - - + + + - + - - + + - - - + + + - - - + + + - - + + - - - - + + + + - - - - - - - - - - - + + + + + + + + + + + - + - - - + + + - + - - - + + + - + - + - + - + - + - + - + - - - - + + + + - - + + - + - + - - - - - + + + + + - + - - - - - - + + + + + + - - - - + + + + - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - + + + - - + + + + + + + + + + + + + + + + + + + + - + - + - + - + - + - + - + - + - - + + - - + + - - + + - + - - - - + + + + + + + - + - - + + - + - - - + + + - - - + + + + - - - - - - + + + + + + - - - - - + + + + + - - - - + + + + - - - + + + - - + + - - - + + + + + + + + - - - + + + - - + + - - + + - + - + - + - + - + - + - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + - - + + - - - + + + @@ -78407,264 +81673,267 @@ + + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - - - - - + - + - + - - - - - - - - - - - - - - + + + + + + + + + + + + + + - - - + + + - - - - - - + + + + + + - - + + - - + + - - - - - - + + + + + + - - - - - - + + + + + + - - - - - - + + + + + + - + - - + + - - + + - - - + + + - - - - - - - - - - - - - - - - - - - - - + + + + + + - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + @@ -78696,23 +81965,25 @@ - + + + - - - + + + - - + + - - - + + + @@ -78724,625 +81995,672 @@ - + - + - + - + - - - - - + + + + + - - - - - - + + + + + + - - + + - + - + - - - - + + + + - - - + + + - - - - + + + + + + + + + + + + + + - - - - + + + + - - - - + + + + + + + - - - - + + + + - - - - + + + + - - + + + + + + + + + + - - - - - - + + + + + + + - - - - - + + + + + - - + + - - - + + + - - - + + + + + + - - - + + + - - - + + + - - - + + + - - - + + + - + - - + + - + - - - - - + + + + + - - + + - - - - - + + + + + - - - + + + - - - - - + + + + + - - - + + + - - + + - - - - - - + + - - - - - + + + + + - - - - - - - + + + + + + + - - - - + + + + - - + + - - - + + + - - - + + + - - - - + + + + - - - - - + + + + + - - - + + + - - + + - - - + + + - - + + - - + + - - + + - + + + + + + + + + - + - - + + - - + + - - + + - + - + - + - + - + - + - + - + - + - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + + + + + + + + + + + + + - + - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - + - - - - - - - - - - - - - - - - + - + - + - + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + + + + + + + + + + + + + + + + + - + - + - - + + - + - + - + - - + + @@ -79356,9 +82674,9 @@ - - - + + + @@ -79384,906 +82702,978 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - + + + - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - + + + + - - - - - - - - - - - - - - - - + + + + - - - - + + + + + - - - - + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - + + - - - - + + + + - - - - + + + - - + + + + + + + + + + + + + + - - + + - + - + - + - + + - - - - - - - - - + + + + + + + + + - + - + - + - + - + - - + + - - - + + + - - - + + + - + + + + - - + + - - + + - - - + + + - + - - - - - - - + + + + + + + - - - + + + - - - - - + + + + + - - - - - - - - - + - + - + - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + - - - - - + + + + + - - - - + + + + - - - - + + + + - - + + - - - + + + - - - - + + + + - - + + - - - + + + - - - + + + - - - - + + + + - - - + + + - - - - - - + + + + + + - - - - - + + + + + - + - - - + + + - - - + + + - - - - - + + + + + - - - - - - + + + + + + - - - - - + + + + + - + - - + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - + + + + + + + - + - - - - - + + + + + - - - - + + + + - - + + - - - + + + - - - - - + + + + + - - - - + + + + - - - + + + - - - + + + - - - + + + - - + + - - - + + + - - + + - - - - + + + + - - - + + + - - - + + + - - + + - - - + + + - - + + - - - + + + - - + + - - - + + + - - + + - + - - - + + + - - - + + + - - - + + + - - + + - - - + + + - - + + - - + + - - - - + + + + - - - - - + + + + + - - - - + + + + - - - - + + + + - - - - - + + + + + - - - + + + - - - - + + + + - + - - - - + + + + - - - - + + + + - - - + + + - - - + + + - + - + - + - + - + - + - + - - - - - + + + + - - - + + + - - - + + + - - - + + + - - - - + + + + - - - - + + + + - - - - - - - - - + + + + + + + + + + + + + + + + + + + + - - + + - - - - - - - + + + + + + + - - + + - - - - - + + + + + - - - - - + + + + + - - - + + + - - + + - - - + + + - - + + - - - + + + - - + + - - - + + + - - - - - - - - + + - + - + - + - + - + - + - + - + - - + + - - - + + + - - - + + + - - - - - - - - - - - - + - + - - - - - - - - - - - - - - - - - - + - - - + + + - - - - - - + + + + + + - - - + + + + - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - + - + - + - + - + - + - + - + - + @@ -80298,157 +83688,168 @@ - + - + - + - + - + - + - + - + - + - + - + - + - - + + - - + + - - + + - + - + - + - + - - + + - - + + - - + + - + - + - + - + - + - + - + - + - + - + - + + + + + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + @@ -80456,228 +83857,229 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + - - - - + + + + - + - - - - - - + + + + + + - - - - - - + + + + + + - - - - - - + + + + + + - - - + + + - - - - - - + + + + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - - + + + + - - - + + + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -80686,53 +84088,53 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -80750,1166 +84152,1024 @@ - - - - - - - - - - - - - - + + + + + + + + + + + + + + - + - - + + - - - - + + + + - - - - - + + + + + - - - - - + + + + + - - - - + + + + - - - - - + + + + + - - - - + + + + - - - - - - + + + + + + + + + + + - + - + - + - + - + - + - - + + - - - + + + - + - + - + - + - + - + - + - + - + - + - + - - - - + - - + + - - + + - - + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - - - - - - - - - - - - + + + + + - - - - + + + + - - - - + + + + + - - - - - + + + + + - - - - + + + + - - + + - + - - - - + + + + - - - - + + + + - - + + - + - + - + - - - + + + - - - + + + - - + + - - - - - - - + + - + - + - + - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + - - - - - + - + - + - - - + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - + + + + + - - - - + + + + - - - + + + - - - + + + - - - - - - - - - - - + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + - + - + - + - + - - - - - - - - - - - - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + - - - - - + + + + + - - - - + + + + - - - + + + - - - + + + - - - + + + - - - - + + + + - - - - - - + + + + + + - - - + + + - - - - - - + + + + + + - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - - - + + + - - + + - + - - - - - - + + + + + + - - - - - - + + + + + + - - + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + - - - - - - - + - + - + - + - + + + + - - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - - - - - - + + + + + + - - - - - - - + + + + + + + - + - + - - + + - - - + + + - - - + + + - - - - + + + + - - - - - + + + + + - - + + - - + + - - - + + + + + + + + + + + + + + + + + + + + + + - - - - - + + + + + - - - - - + + + + + - - - - + + + + - - - - + + + + - - - - + + + + - - - - - - + + + + + + - - - - + + + + - + - - - + + + - - - + + + - - + + - - + + - + - + - - + + @@ -81920,2365 +85180,1740 @@ - - - - - - - - - - - - - - + + + + + + + + + + - - - - + + + + - + - - + + - - - + + + - - - + + + - - - - - + + + + + - - - + + + - + - - - - - + + + + + - - - - - - - - - - + + - - - + + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + - - + + - - + + + + + - + - + - + - + - - - - + - - - - + - + - - - - - - - + - - - - - - - - - - - - - - + + - - + + + + - - + + - - + + - - + + - + - + - + - + - + - + - + - - + + - - + + - - + + + + - - + + - - + + - - + + - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + - - + + - - + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - + - + - + - + - - - - + - - + + - - + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + - - - - - + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + - - - - + + + + + - - - - - - - - - - - - - - - - - - - - + + + + + + + + + - - - - - - + + + + + + - + - - - - + + + + - + - + - - - - - - - + + + + + + + - - - - + + + + - - - - - + + + + + - - - - - - + + + + + + - - - + + + - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - + + + + + + + + + - - - - + + + + - - - - - + + + + + - - - - + + + + - - - - - - + + + + + + - - - + + + - - - - - - - + + + + + + + - - - - + + + + - - - - - + + + + + - - - - - - + + + + + + - - - - - + + + + + - - - + + + - - - + + + - - + + + + + + + + + + + + + - - + + - + - + - + - + - + - - - - - - - - - - - - - - - - - - + + + + - + - + - - + + - - - - + + + + - - - - + + + + - + - - + + - + - + - - - - - + + + - - + + - + - - - - - - + + - - + + - - + + - - + + - - + + - + - + - - + + - - + + - + - - + + - + - + - - - - - - - - - - + + - - - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - + - + - + - - - - - - - - - - + - + + + + - + + + + - + + + + - + - + - + - + - - - - - - - + - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - + + + + + + + - + + + + - + - + - + - + - + - - + + - - + + - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + + + + + + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - + + + + + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - - + + - - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - + - - + + - - - - + - - - - - - - + - - + + - - + + - - + + - - + + - - + + - - + + - - - - + - + + + + - + + + + + + + - + + + + + + + - - + + @@ -84295,201 +86930,213 @@ - + - + - + - + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - - + + - - + + - - + + - - + + - - + + + + + + + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - - + + @@ -84499,16 +87146,16 @@ - + - + - + - - + + @@ -84522,878 +87169,884 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - - + + - - + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + @@ -85406,143 +88059,143 @@ - - + + - + - + - + - + - - + + - + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + @@ -85550,766 +88203,772 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - + - + - - + + @@ -86317,1321 +88976,1324 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + + - - + + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - - - + + + - - + + - - - + + + - + - - - + + + - - + + - - - + + + - - + + - - - + + + - - + + - - - + + + - + - - - + + + - - - + + + - - - + + + - - - + + + - - - - - + + + + + - - + + - - - + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + - - - - + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - - - + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - - - - + + + + - - - - - - - - - + + + + + + + + + - + - - - - - + + + + + - - - - - - - - + + + + + + + + - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - + + + - - + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - - - + + + + + - - - - + + + + - - - - - - - + + + + + + + - + - - + + - - + + - + - - + + - + - - - + + + - - - + + + - + - + - + - + - + - + - + - + - + - + - + - - + + @@ -87767,11 +90429,17 @@ - - + + + + + + + + - - + + @@ -88054,726 +90722,743 @@ - - - + + + + + + - + - + - + - + - + - + - - - - - - - - - - + + + + + + + + + + - - - + + + - - - - - - + + + + + + - - - + + + - - - - + + + + - - - - - - + + + + + + - - - - - + + + + + - - - - - + + + + + - - - - - + + + + + - - - - - - - + + + + + + + - - - - - + + + + + - - - - - - + + + + + + - - - + + + - - - - - - + + + + + + - - - + + + - - - + + + - - - - - + + + + + + + + + + + - - + + - + - + - + - - - - - + + + + + + + + + + + + + - - - + + + - - + + - + - + - + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + @@ -88787,2060 +91472,1783 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - - + + - - - + + + - - - + + + - - - - - - - + + + + + + + - + - - - + + + - - + + - - - + + + - - - + + + - - - + + + - + - + - + - + - + - + - - - - + + + + - - - - - + + + + + - - - - - - - - - - - - - - - - - + - + - - - - - - - - - - - - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - + - + - - - - - - - + - - + + - - + + - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - + - + - + - - - - + + - - + + - - - - + + - - + + - - + + + + + - + - - - - - - - - - - + - + - - - - + + - + - - + + - - + + - + - - + + - - + + - - + + - - + + - - + + - - + + + + + - + - + - - + + - - + + - + - - - - + - - + + - - + + + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + + + - - + + - - + + - - + + - - + + - - + + + + - - + + - - + + - + - - - - + - - - - - + + - - + + + + - - + + - + - + - - + + - - + + - - + + - - + + + + + + + - + - + - + - + - + - + - + - + - + - + - - - - - + + - - + + - - + + - + - + - - - - - + + - - + + - - - - + + - - + + - - + + - - - - + + - - + + - - + + - - + + - + - - - - + - + - + - + - + - + - - + + - + - + - - - - - - - - - - - - - - - - + - + - - - - - - - - - - - - - + - + - - - - - - - + - + - + - - - - - + + - + - + - - + + + + + - + - + - + - + + + + - + + + + - + - - + + - - - - - - - + - - + + - - + + + + - - + + - - + + - + - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + + + + - + - - - - + - + - + - - - - - - - - - - - - - - - - - - - + - - - - - + + - - + + - - + + - - + + - - + + - - + + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + - - + + - - - - + + - - + + - - - - + + - - + + - - + + - - + + - - + + - - + + - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - + - - + + - - + + - + - - + + - - + + - - + + - - + + - - + + - + - + - - + + - + - + - + - + - + - + - + - - - + + + - + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - - - - - - - + + + + + + + + + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - - - - - - - - + + + + + + + + + + - + - - - - + + + + - - - - + + + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - - + + + + + + - - + + - - - + + + - - - - - + + + + + - - - - - + + + + + - - - + + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - - + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - + - - - - + + + + - + - - - - - - + + + + + + - - - - - + + + + + - - + + - - - - + + + + - + - - - - + + + + - - + + - - - + + + - - + + - + - + - + - + - + - + - + - + - + - + - + - - + + @@ -90849,625 +93257,639 @@ - - - - - - + + + + + + + + + + + - - - - - - - + + + + + + + - - + + - + - + - + - + - + - - + + - - - + + + - - - - + + + + - + + + + + - - + + - - + - - - - + - - + + - + + - + - - + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + - - - - - + + + + + - - - - - + + + + + - - - - - + + + + + - - - - + + + + - - - - - - - + + + + + + + - - - - - - - + + + + + + + - - - - - - - - + + + + + + + + - + - + - + - - - + + + - - - - - + + + + + - + - - - + + + - - + + - - + + - - - - - - - - - - + + + + + + + + + + - + - - - - - - + + + + + + - - - - + + + + - - - - + + + + - - - + + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + - - + + - - - - - + + + + + - - - - - + + + + + - - - - - - - + + + + + + + - - + + + + + - - - - + - - - - - - + + + + + + - - - - - - - + + + + + + + - - - - + + + + - - - + + + - - - - - - - - + + + + + + + + - - + + - - - + + + - + - + - + - - - - + + + + - - + + + + + + + + + - - + + - - - - - - + + - - + + - - - + + - + + - + - + - + - + - - + + - - + + - - + + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - - + + @@ -91507,1035 +93929,679 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - - - - - - - - - + - + + + + - + - + - + - + - + - + - + - + - + - + - + + + + - + - + - - + + - + - - - - + - + - + + + + - + - + - - - - + - + - + - + - + - + - - - - + - + - - + + - - + + - - + + + + + + + + - + - + - + - + - - + + - - + + - - + + - - + + + + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - - + + - - + + - + - + - - + + - + - + - + - + - + - + - + - + - + - - + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + @@ -92545,66 +94611,66 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -92612,19 +94678,19 @@ - + - + - + - + @@ -92635,7 +94701,7 @@ - + @@ -92643,18 +94709,18 @@ - + - + - + - + - + @@ -92665,143 +94731,143 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -92809,10 +94875,10 @@ - + - + @@ -92833,35 +94899,35 @@ - + - + - + - + - + - + - + - + - + @@ -92872,21 +94938,21 @@ - + - + - + - + - + - + @@ -92894,7 +94960,7 @@ - + @@ -92902,23 +94968,23 @@ - + - + - + - + - + @@ -92936,13 +95002,13 @@ - + - + @@ -92951,18 +95017,18 @@ - + - + - + - + @@ -92977,15 +95043,15 @@ - + - + - + @@ -93024,7 +95090,7 @@ - + @@ -93032,19 +95098,19 @@ - + - + - + - + @@ -93059,10 +95125,10 @@ - + - + @@ -93073,25 +95139,25 @@ - + - + - + - + - + - + - + @@ -93118,16 +95184,16 @@ - + - + - + @@ -93156,7 +95222,7 @@ - + @@ -93179,1366 +95245,2085 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + + + + + + + - + + + + - + + + + + + + + + + - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + + + + + + + + + + - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - + - + - + + + + - + - + - + - + + + + - + + + + - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + + + + - + - - + + - - + + - - + + + + + - + + + + - + - - - - + - + - - + + - - - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - + - - - - + - + - + - - + + - - + + - - - - + + - - + + - - + + - - + + - - - - - - - - - - - - + + - - + + - - + + - - - - + + - - + + - - + + - - + + - - - - + + - - + + - - + + - - - - + + - - + + - - + + - - + + - - + + - - + + + + - - + + - - + + - + - + - + + + + - + - + - + - + - + - + - + - - - - + - - - - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - - - + + - - + + - - + + - - + + - - + + - - + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + - - - - - - - + - - - - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + - - - - - - - - - - - - - - - - - - - - - - - - + + - - + + - - - - + + - - + + - - - - + + + + - - + + - - + + - + - - - - - + + - - + + - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - + + + + + - - - - - - + + + + + + - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - + + + + + + + + + + + - - - - + + + + + + + + + + + - - - - - - - - + + + + + + + + + + + + + + + + + + diff --git a/abi_gki_aarch64_cuttlefish_whitelist b/abi_gki_aarch64_cuttlefish_whitelist index b795c2c9f18ec4c99e93f1dcd60c59d637f7c978..dee1421a79791e3de6f88e345f0156c3a55cc1e4 100644 --- a/abi_gki_aarch64_cuttlefish_whitelist +++ b/abi_gki_aarch64_cuttlefish_whitelist @@ -22,7 +22,6 @@ __cpu_online_mask cpus_read_lock cpus_read_unlock - debug_smp_processor_id delayed_work_timer_fn destroy_workqueue _dev_err @@ -124,8 +123,6 @@ platform_device_unregister __platform_driver_register platform_driver_unregister - preempt_count_add - preempt_count_sub preempt_schedule preempt_schedule_notrace prepare_to_wait_event @@ -437,7 +434,6 @@ copy_page dev_driver_string devres_add - __devres_alloc_node devres_destroy devres_free dma_buf_get @@ -610,7 +606,6 @@ kmem_cache_free memdup_user put_unused_fd - remove_conflicting_framebuffers sg_alloc_table_from_pages sync_file_create sync_file_get_fence diff --git a/abi_gki_aarch64_qcom_whitelist b/abi_gki_aarch64_qcom_whitelist index 78d2e57390151aa71d757eb9025ab08e3908495b..f9fc1466fc3e6b5d88e3dd705906d456e56de9da 100644 --- a/abi_gki_aarch64_qcom_whitelist +++ b/abi_gki_aarch64_qcom_whitelist @@ -16,7 +16,6 @@ __arch_copy_from_user __arch_copy_to_user arch_setup_dma_ops - arch_timer_read_ool_enabled arm64_const_caps_ready atomic_notifier_call_chain atomic_notifier_chain_register @@ -48,6 +47,7 @@ cdev_add cdev_del cdev_init + __cfi_slowpath __check_object_size __class_create class_destroy @@ -108,22 +108,6 @@ crypto_destroy_tfm crypto_shash_setkey _ctype - debugfs_attr_read - debugfs_attr_write - debugfs_create_atomic_t - debugfs_create_bool - debugfs_create_dir - debugfs_create_file - debugfs_create_file_unsafe - debugfs_create_u16 - debugfs_create_u32 - debugfs_create_u64 - debugfs_create_u8 - debugfs_create_x32 - debugfs_create_x8 - debugfs_print_regs32 - debugfs_remove - debugfs_remove_recursive delayed_work_timer_fn del_timer del_timer_sync @@ -246,6 +230,7 @@ dma_fence_signal_locked __dma_flush_area __dma_inv_area + dmam_alloc_coherent dma_release_from_dev_coherent dma_request_slave_channel do_exit @@ -258,8 +243,6 @@ drm_panel_notifier_unregister dst_release dummy_dma_ops - __dynamic_dev_dbg - __dynamic_pr_debug enable_irq ether_setup eth_type_trans @@ -291,23 +274,9 @@ freezing_slow_path fwnode_property_read_u32_array gcd - generic_file_llseek generic_handle_irq - geni_abort_m_cmd - geni_cancel_m_cmd - geni_se_dump_dbg_regs - geni_se_init - geni_se_iommu_map_buf - geni_se_iommu_unmap_buf - geni_se_resources_init - geni_se_rx_dma_prep - geni_se_rx_dma_unprep - geni_se_select_mode - geni_setup_m_cmd - geni_se_tx_dma_prep - geni_se_tx_dma_unprep - geni_write_reg - genlmsg_put + genl_register_family + genl_unregister_family gen_pool_add_virt gen_pool_alloc gen_pool_create @@ -318,9 +287,7 @@ __get_free_pages get_pid_task get_random_bytes - get_se_proto __get_task_comm - get_tx_fifo_depth get_unused_fd_flags gpiochip_add_data_with_key gpiochip_add_pin_range @@ -345,7 +312,6 @@ handle_edge_irq handle_level_irq handle_nested_irq - hex2bin hex_dump_to_buffer hrtimer_active hrtimer_cancel @@ -470,12 +436,9 @@ kstrtoint kstrtoll kstrtos8 - kstrtos8_from_user kstrtou16 kstrtou8 - kstrtou8_from_user kstrtouint - kstrtouint_from_user kstrtoull kthread_cancel_work_sync kthread_create_on_node @@ -486,7 +449,6 @@ kthread_stop kthread_worker_fn ktime_get - ktime_get_coarse_real_ts64 ktime_get_real_ts64 ktime_get_with_offset kvfree @@ -542,7 +504,6 @@ module_put __msecs_to_jiffies msleep - msleep_interruptible __mutex_init mutex_lock mutex_lock_interruptible @@ -572,9 +533,9 @@ nla_put __nlmsg_put no_llseek - nonseekable_open nr_cpu_ids ns_capable + ns_to_timespec64 ns_to_timespec nvmem_cell_get nvmem_cell_put @@ -749,6 +710,7 @@ _raw_spin_lock_bh _raw_spin_lock_irq _raw_spin_lock_irqsave + _raw_spin_trylock _raw_spin_trylock_bh _raw_spin_unlock _raw_spin_unlock_bh @@ -790,6 +752,7 @@ __regmap_init regmap_multi_reg_write regmap_raw_read + regmap_raw_write regmap_read regmap_update_bits_base regmap_write @@ -798,6 +761,7 @@ regulator_disable regulator_enable regulator_get + regulator_get_voltage regulator_is_enabled regulator_put regulator_set_load @@ -817,8 +781,6 @@ rtc_time64_to_tm rtc_tm_to_time64 rtc_valid_tm - rt_mutex_lock - rt_mutex_unlock rtnl_is_locked rtnl_unlock saved_command_line @@ -827,18 +789,12 @@ schedule schedule_timeout scnprintf - se_config_packing - se_geni_clks_off - se_geni_resources_off - se_geni_resources_on seq_lseek seq_open seq_printf - seq_putc seq_puts seq_read seq_release - seq_write set_normalized_timespec set_user_nice sg_alloc_table @@ -848,15 +804,7 @@ sg_next __sg_page_iter_next __sg_page_iter_start - simple_attr_open - simple_attr_read - simple_attr_release - simple_attr_write - simple_open simple_read_from_buffer - simple_write_to_buffer - single_open - single_release sk_alloc skb_add_rx_frag skb_clone @@ -969,6 +917,7 @@ sysfs_remove_group sysfs_remove_groups sysfs_streq + system_freezable_wq system_freezing_cnt system_highpri_wq system_long_wq @@ -998,16 +947,22 @@ trace_event_reg trace_handle_return trace_print_flags_seq + trace_print_hex_seq + trace_print_symbols_seq trace_raw_output_prep trace_seq_printf try_module_get + tty_flip_buffer_push typec_register_partner typec_register_port typec_set_data_role typec_set_pwr_role typec_unregister_partner + uart_add_one_port + uart_register_driver + uart_remove_one_port + uart_unregister_driver __udelay - uncached_logk __unregister_chrdev unregister_chrdev_region unregister_inet6addr_notifier @@ -1035,7 +990,6 @@ usb_free_all_descriptors usb_function_register usb_function_unregister - usb_func_wakeup usb_gadget_wakeup usb_get_dr_mode usb_interface_id @@ -1125,7 +1079,6 @@ iommu_group_remove_device iommu_group_set_iommudata iommu_put_dma_cookie - kstrtoull_from_user of_dma_is_coherent of_n_addr_cells of_n_size_cells @@ -1180,9 +1133,6 @@ # required by cam_flash.ko thermal_zone_get_cdev_by_name -# required by cam_ife_csid.ko - ns_to_timespec64 - # required by cam_ife_hw_mgr.ko __ll_sc_atomic_fetch_add @@ -1199,6 +1149,10 @@ led_trigger_register_simple led_trigger_unregister_simple +# required by cam_smmu_api.ko + iommu_dma_enable_best_fit_algo + iommu_dma_reserve_iova + # required by cam_utils.ko gpio_free_array of_clk_get_from_provider @@ -1206,13 +1160,13 @@ __request_region # required by citadel-spi.ko + nonseekable_open spi_bus_lock spi_bus_unlock spi_sync_locked # required by clk-qcom.ko clk_aggregate_rate - clk_debug_print_hw __clk_determine_rate clk_fixed_rate_ops clk_hw_get_flags @@ -1276,18 +1230,6 @@ send_sig_info time64_to_tm -# required by dm-default-key.ko - bio_crypt_alloc_ctx - blk_crypto_evict_key - blk_crypto_init_key - dm_get_device - dm_put_device - dm_read_arg_group - dm_register_target - dm_shift_arg - dm_table_get_mode - dm_unregister_target - # required by dwc3-haps.ko pcim_enable_device __pci_register_driver @@ -1323,7 +1265,6 @@ pinctrl_pm_select_default_state pinctrl_pm_select_sleep_state strcat - system_freezable_wq usb_add_gadget_udc usb_del_gadget_udc usb_ep_set_maxpacket_limit @@ -1368,12 +1309,7 @@ xt_unregister_target # required by eud.ko - tty_flip_buffer_push - uart_add_one_port uart_insert_char - uart_register_driver - uart_remove_one_port - uart_unregister_driver # required by event_timer.ko cpumask_next_and @@ -1385,7 +1321,9 @@ of_find_i2c_device_by_node # required by ftm5.ko + ktime_get_coarse_real_ts64 proc_create + seq_write # required by google-battery.ko simple_strtoull @@ -1396,12 +1334,6 @@ # required by governor_bw_hwmon.ko argv_free argv_split - __tracepoint_bw_hwmon_meas - __tracepoint_bw_hwmon_update - -# required by governor_memlat.ko - __tracepoint_memlat_dev_meas - __tracepoint_memlat_dev_update # required by gpi.ko dma_async_device_register @@ -1437,11 +1369,13 @@ i2c_put_dma_safe_msg_buf # required by ion-alloc.ko - dentry_path dma_buf_export dma_get_device_base dma_get_size + __ll_sc_atomic64_sub_return + mm_event_count __next_zones_zonelist + ptr_to_hashval sched_setattr split_page vm_map_ram @@ -1452,6 +1386,7 @@ add_wait_queue alloc_etherdev_mqs eth_mac_addr + kstrtos8_from_user pci_clear_master pci_disable_device pci_enable_device @@ -1471,7 +1406,7 @@ # required by lpm-stats.ko kobject_get module_ktype - on_each_cpu + simple_write_to_buffer # required by machine_dlkm.ko devm_snd_soc_register_card @@ -1483,6 +1418,7 @@ snd_soc_dai_set_pll snd_soc_dai_set_sysclk snd_soc_dai_set_tdm_slot + snd_soc_get_pcm_runtime snd_soc_of_get_dai_link_codecs snd_soc_of_parse_audio_routing snd_soc_of_parse_card_name @@ -1538,7 +1474,6 @@ bpf_trace_run10 _cleanup_srcu_struct __clk_get_name - debugfs_lookup devfreq_cooling_unregister device_show_int device_store_int @@ -1566,7 +1501,7 @@ __ll_sc_atomic_or mmap_min_addr mmput - noop_llseek + mm_trace_rss_stat of_devfreq_cooling_register plist_del rb_last @@ -1577,21 +1512,18 @@ sysfs_create_bin_file sysfs_remove_bin_file sysfs_remove_files - trace_print_symbols_seq unmapped_area_topdown unregister_shrinker vm_insert_page vm_insert_pfn # required by msm_bus_rpmh.ko - __msm_bus_scale_client_update_request_cb - __msm_bus_scale_register_cb - __msm_bus_scale_register_client_cb - __msm_bus_scale_update_bw_cb of_clk_get_by_name raw_notifier_call_chain raw_notifier_chain_register raw_notifier_chain_unregister + rt_mutex_lock + rt_mutex_unlock # required by msm_drm.ko adjust_managed_page_count @@ -1599,7 +1531,6 @@ bpf_trace_run12 __clk_get_hw clk_get_parent - debugfs_create_size_t device_create_with_groups devm_clk_bulk_get devm_of_pwm_get @@ -1863,7 +1794,6 @@ shmem_truncate_range strreplace timespec64_to_jiffies - tracing_off unmap_kernel_range unmap_mapping_range vm_get_page_prot @@ -1873,15 +1803,25 @@ # required by msm_ext_display.ko devm_extcon_dev_unregister -# required by msm_gsi.ko - kstrtoint_from_user - -# required by msm_icnss.ko - dmam_alloc_coherent +# required by msm_geni_serial.ko + console_stop + console_suspend_enabled + do_SAK + handle_sysrq + oops_in_progress + __tty_insert_flip_char + tty_insert_flip_string_fixed_flag + uart_console_device + uart_console_write + uart_get_baud_rate + uart_parse_options + uart_resume_port + uart_set_options + uart_suspend_port + uart_update_timeout + uart_write_wakeup # required by msm_ipc_logging.ko - debugfs_file_get - debugfs_file_put _raw_read_lock_irq _raw_read_unlock_irq _raw_write_lock_irqsave @@ -1893,14 +1833,13 @@ __tracepoint_clock_set_rate # required by msm_minidump.ko - linux_banner + linux_banner_ptr # required by msm_npu.ko kmem_cache_create_usercopy # required by msm_pm.ko arm_cpuidle_suspend - clock_debug_print_enabled cpu_do_idle cpuidle_dev cpuidle_register_device @@ -1913,10 +1852,8 @@ param_get_bool param_get_uint pending_ipi - pm_gpio_debug_mask pm_qos_request_for_cpu pm_qos_request_for_cpumask - regulator_debug_print_enabled s2idle_set_ops set_update_ipi_history_callback suspend_set_ops @@ -1934,10 +1871,6 @@ hwrng_register hwrng_unregister -# required by msm_rtb.ko - arch_timer_read_counter - set_uncached_logk_func - # required by msm_scm.ko __arm_smccc_smc @@ -1969,14 +1902,10 @@ irq_chip_set_wake_parent irq_create_fwspec_mapping irq_domain_free_irqs_top - msm_gpio_dump_builtin_cb of_irq_domain_map register_restart_handler unregister_restart_handler -# required by pinctrl-spmi-gpio.ko - pmic_gpio_dump_builtin_cb - # required by platform_dlkm.ko __ll_sc___cmpxchg_case_mb_8 of_property_read_variable_u16_array @@ -2115,7 +2044,6 @@ # required by qpnp-smb5-charger.ko iio_channel_release - regulator_get_voltage # required by qpnp_pdphy.ko device_get_named_child_node @@ -2179,9 +2107,7 @@ csum_tcpudp_nofold __dev_get_by_index dev_queue_xmit - __dynamic_netdev_dbg - genl_register_family - genl_unregister_family + genlmsg_put get_current_napi_context gro_cells_destroy gro_cells_init @@ -2201,11 +2127,11 @@ skb_append_pagefrags skb_checksum synchronize_rcu - trace_print_hex_seq unregister_netdevice_many # required by rndis.ko dev_get_stats + print_hex_dump_bytes # required by roles.ko class_find_device @@ -2424,7 +2350,6 @@ # required by snd-soc-wm-adsp.ko regmap_async_complete - regmap_raw_write regmap_raw_write_async snd_compr_stop_error snd_soc_component_disable_pin @@ -2432,13 +2357,6 @@ # required by spi-geni-qcom.ko dma_release_channel - geni_read_reg - geni_se_clk_freq_match - geni_se_qupv3_hw_version - get_rx_fifo_depth - get_tx_fifo_width - se_geni_clks_on - se_get_packing_config __spi_alloc_controller spi_register_controller spi_unregister_controller @@ -2450,7 +2368,6 @@ spmi_controller_remove # required by st21nfc.ko - desc_to_gpio device_set_wakeup_capable # required by subsystem-restart.ko @@ -2480,7 +2397,6 @@ typec_set_vconn_role typec_unregister_altmode typec_unregister_port - usb_debug_root # required by tps-regulator.ko gpiod_export @@ -2488,21 +2404,44 @@ regulator_list_voltage_table regulator_map_voltage_ascend -# required by ufs_qcom.ko - ufsdbg_pr_buf_to_std - ufshcd_dme_get_attr - ufshcd_dme_set_attr - ufshcd_get_local_unipro_ver - ufshcd_hold - ufshcd_pltfrm_init - ufshcd_pltfrm_resume - ufshcd_pltfrm_runtime_idle - ufshcd_pltfrm_runtime_resume - ufshcd_pltfrm_runtime_suspend - ufshcd_pltfrm_shutdown - ufshcd_pltfrm_suspend - ufshcd_release - ufshcd_remove +# required by ufshcd-core.ko + async_schedule + bio_crypt_should_process + blk_queue_max_segment_size + blk_queue_rq_timeout + blk_queue_update_dma_pad + dev_pm_opp_remove + down_read_trylock + down_write_trylock + keyslot_manager_create + keyslot_manager_destroy + keyslot_manager_private + keyslot_manager_reprogram_all_keys + keyslot_manager_set_max_dun_bytes + __ll_sc_atomic64_fetch_andnot_release + __ll_sc_atomic64_fetch_or_acquire + __scsi_add_device + scsi_add_host_with_dma + scsi_block_requests + scsi_change_queue_depth + scsi_device_get + scsi_device_put + scsi_dma_map + scsi_dma_unmap + __scsi_execute + scsi_host_alloc + scsi_host_put + scsi_print_command + scsi_print_sense_hdr + scsi_remove_device + scsi_remove_host + scsi_report_bus_reset + scsi_scan_host + scsi_set_cmd_timeout_override + scsi_unblock_requests + sdev_prefix_printk + trace_output_call + utf16s_to_utf8s # required by wlan.ko bitmap_print_to_pagebuf @@ -2538,7 +2477,6 @@ cfg80211_unlink_bss cfg80211_update_owe_info_event cfg80211_vendor_cmd_reply - cld80211_get_genl_family complete_and_exit cpufreq_quick_get_max __cpuhp_remove_state @@ -2550,10 +2488,9 @@ crypto_alloc_skcipher crypto_shash_final crypto_shash_update - default_llseek - deregister_cld_cmd_cb dev_alloc_name dump_stack + hex2bin hex_to_bin ieee80211_channel_to_frequency ieee80211_frequency_to_channel @@ -2561,6 +2498,7 @@ ieee80211_hdrlen irq_set_affinity_hint mac_pton + msleep_interruptible netif_tx_stop_all_queues netlink_broadcast __netlink_kernel_create @@ -2579,17 +2517,16 @@ pm_system_wakeup proc_create_data proc_mkdir - _raw_spin_trylock - register_cld_cmd_cb register_netevent_notifier register_sysctl_table regulatory_set_wiphy_regd rtnl_lock save_stack_trace_tsk schedule_timeout_interruptible - seq_hex_dump seq_vprintf set_cpus_allowed_ptr + single_open + single_release skip_spaces strchrnul unregister_netevent_notifier @@ -2635,10 +2572,10 @@ # required by usb_f_gsi.ko dev_get_by_name - kstrtou16_from_user usb_composite_setup_continue usb_ep_autoconfig_by_name usb_func_ep_queue + usb_func_wakeup usb_gsi_ep_op # required by usb_f_mtp.ko diff --git a/abi_gki_aarch64_whitelist b/abi_gki_aarch64_whitelist index 233ea230c8b57a8c7d7d23ec7ad87eb8752bbb88..9f00353c4e5519df4f5b93347764af1c71ca6041 100644 --- a/abi_gki_aarch64_whitelist +++ b/abi_gki_aarch64_whitelist @@ -1,4 +1,221 @@ [abi_whitelist] # commonly used symbols + __cfi_slowpath + __const_udelay + _dev_err + devm_ioremap_resource + devm_kmalloc + devm_request_threaded_irq + _dev_warn + dummy_dma_ops + find_next_bit + find_next_zero_bit + kfree + __kmalloc + kmalloc_caches + kmem_cache_alloc_trace + ktime_get + __list_add_valid + __ll_sc_atomic64_andnot + __ll_sc_atomic_add_return module_layout + __mutex_init + mutex_lock + mutex_unlock + of_find_property + of_property_read_variable_u32_array + platform_get_irq + platform_get_resource + printk __put_task_struct + ___ratelimit + _raw_spin_lock_irqsave + _raw_spin_unlock_irqrestore + snprintf + __stack_chk_fail + __stack_chk_guard + strcmp + __udelay + +# required by arm-smmu.ko + alloc_io_pgtable_ops + amba_bustype + bus_set_iommu + devm_free_irq + _dev_notice + driver_find_device + driver_for_each_device + free_io_pgtable_ops + generic_device_group + iommu_alloc_resv_region + iommu_device_link + iommu_device_register + iommu_device_sysfs_add + iommu_device_unlink + iommu_dma_get_resv_regions + iommu_fwspec_add_ids + iommu_fwspec_free + iommu_fwspec_init + iommu_get_dma_cookie + iommu_group_get_for_dev + iommu_group_put + iommu_group_ref_get + iommu_group_remove_device + iommu_present + iommu_put_dma_cookie + __ll_sc_atomic64_fetch_or + of_device_get_match_data + of_dma_is_coherent + of_phandle_iterator_args + of_phandle_iterator_init + of_phandle_iterator_next + param_ops_bool + param_ops_int + pci_bus_type + pci_device_group + pci_for_each_dma_alias + pci_request_acs + platform_bus_type + __platform_driver_register + platform_driver_unregister + put_device + +# required by ufshcd-core.ko + __alloc_workqueue_key + async_schedule + bio_crypt_should_process + blk_queue_max_segment_size + blk_queue_update_dma_pad + bpf_trace_run2 + bpf_trace_run4 + bpf_trace_run5 + bpf_trace_run8 + cancel_delayed_work + cancel_delayed_work_sync + cancel_work_sync + clk_disable + clk_enable + clk_prepare + clk_set_rate + clk_unprepare + complete + cpu_number + __cpu_online_mask + delayed_work_timer_fn + destroy_workqueue + dev_driver_string + devfreq_add_device + devfreq_remove_device + devfreq_resume_device + devfreq_suspend_device + device_create_file + device_remove_file + devm_clk_get + devm_kfree + devm_regulator_get + dev_pm_opp_add + dev_pm_opp_remove + dmam_alloc_coherent + down_read + down_read_trylock + down_write + event_triggers_call + find_last_bit + finish_wait + flush_work + free_irq + __init_rwsem + init_timer_key + init_wait_entry + __init_waitqueue_head + jiffies + jiffies_to_usecs + keyslot_manager_create + keyslot_manager_destroy + keyslot_manager_private + keyslot_manager_reprogram_all_keys + keyslot_manager_set_max_dun_bytes + kstrtouint + kstrtoull + __ll_sc_atomic64_fetch_andnot_release + __ll_sc_atomic64_fetch_or_acquire + __ll_sc_atomic_sub_return + memcpy + memset + memzero_explicit + __msecs_to_jiffies + msleep + perf_trace_buf_alloc + perf_trace_run_bpf_submit + __pm_runtime_idle + __pm_runtime_resume + preempt_schedule_notrace + prepare_to_wait_event + print_hex_dump + queue_delayed_work_on + queue_work_on + _raw_spin_lock + _raw_spin_unlock + regulator_count_voltages + regulator_disable + regulator_enable + regulator_set_load + regulator_set_voltage + request_threaded_irq + schedule + schedule_timeout + __scsi_add_device + scsi_add_host_with_dma + scsi_block_requests + scsi_change_queue_depth + scsi_device_get + scsi_device_put + scsi_dma_map + scsi_dma_unmap + __scsi_execute + scsi_host_alloc + scsi_host_put + scsi_print_command + scsi_print_sense_hdr + scsi_remove_device + scsi_remove_host + scsi_report_bus_reset + scsi_scan_host + scsi_unblock_requests + sdev_prefix_printk + sg_next + sprintf + strcpy + strlcpy + strlen + strncmp + sysfs_create_groups + sysfs_remove_groups + system_wq + trace_define_field + trace_event_buffer_commit + trace_event_buffer_reserve + trace_event_ignore_this_pid + trace_event_raw_init + trace_event_reg + trace_handle_return + trace_print_hex_seq + trace_print_symbols_seq + trace_raw_output_prep + trace_seq_printf + up_read + up_write + usleep_range + utf16s_to_utf8s + wait_for_completion_timeout + __wake_up + __warn_printk + +# required by ufshcd-pltfrm.ko + _dev_info + kstrdup + of_get_property + of_parse_phandle + of_property_read_string_helper + pm_runtime_enable + __pm_runtime_set_status diff --git a/arch/arm/boot/dts/bcm283x.dtsi b/arch/arm/boot/dts/bcm283x.dtsi index c9322a56300dc45bde2e7fc55953186031440645..f9add515301fdef46e6d383b2d73f33785595f9a 100644 --- a/arch/arm/boot/dts/bcm283x.dtsi +++ b/arch/arm/boot/dts/bcm283x.dtsi @@ -476,6 +476,7 @@ "dsi0_ddr2", "dsi0_ddr"; + status = "disabled"; }; thermal: thermal@7e212000 { diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi index 00d44a60972f7afa122be1079593227ae39fcc21..e64ff80c83c542cc379020d9683a39e2b97e2e77 100644 --- a/arch/arm/boot/dts/imx6qdl.dtsi +++ b/arch/arm/boot/dts/imx6qdl.dtsi @@ -1013,9 +1013,8 @@ compatible = "fsl,imx6q-fec"; reg = <0x02188000 0x4000>; interrupt-names = "int0", "pps"; - interrupts-extended = - <&intc 0 118 IRQ_TYPE_LEVEL_HIGH>, - <&intc 0 119 IRQ_TYPE_LEVEL_HIGH>; + interrupts = <0 118 IRQ_TYPE_LEVEL_HIGH>, + <0 119 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clks IMX6QDL_CLK_ENET>, <&clks IMX6QDL_CLK_ENET>, <&clks IMX6QDL_CLK_ENET_REF>; diff --git a/arch/arm/boot/dts/imx6qp.dtsi b/arch/arm/boot/dts/imx6qp.dtsi index 5f51f8e5c1faeee6bfff1f55d94134ea93292688..d91f92f944c53ee6c9d20068e56c0436ecd7ee31 100644 --- a/arch/arm/boot/dts/imx6qp.dtsi +++ b/arch/arm/boot/dts/imx6qp.dtsi @@ -77,7 +77,6 @@ }; &fec { - /delete-property/interrupts-extended; interrupts = <0 118 IRQ_TYPE_LEVEL_HIGH>, <0 119 IRQ_TYPE_LEVEL_HIGH>; }; diff --git a/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts b/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts index 49547a43cc90ad0e865266350bcb2608cab62cfc..54cbdaf7ffdccea759ac13afcb286bc20c616eb8 100644 --- a/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts +++ b/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts @@ -318,8 +318,8 @@ }; ®_dldo3 { - regulator-min-microvolt = <2800000>; - regulator-max-microvolt = <2800000>; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; regulator-name = "vdd-csi"; }; diff --git a/arch/arm/include/asm/clocksource.h b/arch/arm/include/asm/clocksource.h index 0b350a7e26f37c1dfec80cb85610add6b9bbef99..13651c731a813c224561ff8048270c8044fdd982 100644 --- a/arch/arm/include/asm/clocksource.h +++ b/arch/arm/include/asm/clocksource.h @@ -1,8 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_CLOCKSOURCE_H #define _ASM_CLOCKSOURCE_H -struct arch_clocksource_data { - bool vdso_direct; /* Usable for direct VDSO access? */ -}; +#include -#endif +#endif /* _ASM_CLOCKSOURCE_H */ diff --git a/arch/arm/include/asm/cp15.h b/arch/arm/include/asm/cp15.h index d2453e2d3f1f3804db034c945f268ae614ff3c3f..a54230e65647955a0f0ff3f4f40afcdc7cda43ac 100644 --- a/arch/arm/include/asm/cp15.h +++ b/arch/arm/include/asm/cp15.h @@ -50,25 +50,7 @@ #ifdef CONFIG_CPU_CP15 -#define __ACCESS_CP15(CRn, Op1, CRm, Op2) \ - "mrc", "mcr", __stringify(p15, Op1, %0, CRn, CRm, Op2), u32 -#define __ACCESS_CP15_64(Op1, CRm) \ - "mrrc", "mcrr", __stringify(p15, Op1, %Q0, %R0, CRm), u64 - -#define __read_sysreg(r, w, c, t) ({ \ - t __val; \ - asm volatile(r " " c : "=r" (__val)); \ - __val; \ -}) -#define read_sysreg(...) __read_sysreg(__VA_ARGS__) - -#define __write_sysreg(v, r, w, c, t) asm volatile(w " " c : : "r" ((t)(v))) -#define write_sysreg(v, ...) __write_sysreg(v, __VA_ARGS__) - -#define BPIALL __ACCESS_CP15(c7, 0, c5, 6) -#define ICIALLU __ACCESS_CP15(c7, 0, c5, 0) - -#define CNTVCT __ACCESS_CP15_64(1, c14) +#include extern unsigned long cr_alignment; /* defined in entry-armv.S */ diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h index cb2a3423b7148cfaa5f0cedc8b8e8611c9e9beb7..ab0fe3d7c680ff48ed4f7dc0e0aff90454ea9fe1 100644 --- a/arch/arm/include/asm/processor.h +++ b/arch/arm/include/asm/processor.h @@ -23,6 +23,7 @@ #include #include #include +#include #ifdef __KERNEL__ #define STACK_TOP ((current->personality & ADDR_LIMIT_32BIT) ? \ @@ -94,16 +95,6 @@ extern void release_thread(struct task_struct *); unsigned long get_wchan(struct task_struct *p); -#if __LINUX_ARM_ARCH__ == 6 || defined(CONFIG_ARM_ERRATA_754327) -#define cpu_relax() \ - do { \ - smp_mb(); \ - __asm__ __volatile__("nop; nop; nop; nop; nop; nop; nop; nop; nop; nop;"); \ - } while (0) -#else -#define cpu_relax() barrier() -#endif - #define task_pt_regs(p) \ ((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1) diff --git a/arch/arm/include/asm/vdso/clocksource.h b/arch/arm/include/asm/vdso/clocksource.h new file mode 100644 index 0000000000000000000000000000000000000000..6daa9ba181b655bff305b8fd38674b2c3a61d0dd --- /dev/null +++ b/arch/arm/include/asm/vdso/clocksource.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_VDSOCLOCKSOURCE_H +#define __ASM_VDSOCLOCKSOURCE_H + +struct arch_clocksource_data { + bool vdso_direct; /* Usable for direct VDSO access? */ +}; + +#endif /* __ASM_VDSOCLOCKSOURCE_H */ diff --git a/arch/arm/include/asm/vdso/cp15.h b/arch/arm/include/asm/vdso/cp15.h new file mode 100644 index 0000000000000000000000000000000000000000..bed16fa1865e9827077d4c5315d7114edfb37e49 --- /dev/null +++ b/arch/arm/include/asm/vdso/cp15.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2020 ARM Ltd. + */ +#ifndef __ASM_VDSO_CP15_H +#define __ASM_VDSO_CP15_H + +#ifndef __ASSEMBLY__ + +#ifdef CONFIG_CPU_CP15 + +#include + +#define __ACCESS_CP15(CRn, Op1, CRm, Op2) \ + "mrc", "mcr", __stringify(p15, Op1, %0, CRn, CRm, Op2), u32 +#define __ACCESS_CP15_64(Op1, CRm) \ + "mrrc", "mcrr", __stringify(p15, Op1, %Q0, %R0, CRm), u64 + +#define __read_sysreg(r, w, c, t) ({ \ + t __val; \ + asm volatile(r " " c : "=r" (__val)); \ + __val; \ +}) +#define read_sysreg(...) __read_sysreg(__VA_ARGS__) + +#define __write_sysreg(v, r, w, c, t) asm volatile(w " " c : : "r" ((t)(v))) +#define write_sysreg(v, ...) __write_sysreg(v, __VA_ARGS__) + +#define BPIALL __ACCESS_CP15(c7, 0, c5, 6) +#define ICIALLU __ACCESS_CP15(c7, 0, c5, 0) + +#define CNTVCT __ACCESS_CP15_64(1, c14) + +#endif /* CONFIG_CPU_CP15 */ + +#endif /* __ASSEMBLY__ */ + +#endif /* __ASM_VDSO_CP15_H */ diff --git a/arch/arm/include/asm/vdso/gettimeofday.h b/arch/arm/include/asm/vdso/gettimeofday.h new file mode 100644 index 0000000000000000000000000000000000000000..36dc18553ed8f88850991d76fab739b1f54d9526 --- /dev/null +++ b/arch/arm/include/asm/vdso/gettimeofday.h @@ -0,0 +1,145 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2018 ARM Limited + */ +#ifndef __ASM_VDSO_GETTIMEOFDAY_H +#define __ASM_VDSO_GETTIMEOFDAY_H + +#ifndef __ASSEMBLY__ + +#include +#include +#include +#include + +#define VDSO_HAS_CLOCK_GETRES 1 + +extern struct vdso_data *__get_datapage(void); + +static __always_inline int gettimeofday_fallback( + struct __kernel_old_timeval *_tv, + struct timezone *_tz) +{ + register struct timezone *tz asm("r1") = _tz; + register struct __kernel_old_timeval *tv asm("r0") = _tv; + register long ret asm ("r0"); + register long nr asm("r7") = __NR_gettimeofday; + + asm volatile( + " swi #0\n" + : "=r" (ret) + : "r" (tv), "r" (tz), "r" (nr) + : "memory"); + + return ret; +} + +static __always_inline long clock_gettime_fallback( + clockid_t _clkid, + struct __kernel_timespec *_ts) +{ + register struct __kernel_timespec *ts asm("r1") = _ts; + register clockid_t clkid asm("r0") = _clkid; + register long ret asm ("r0"); + register long nr asm("r7") = __NR_clock_gettime64; + + asm volatile( + " swi #0\n" + : "=r" (ret) + : "r" (clkid), "r" (ts), "r" (nr) + : "memory"); + + return ret; +} + +static __always_inline long clock_gettime32_fallback( + clockid_t _clkid, + struct old_timespec32 *_ts) +{ + register struct old_timespec32 *ts asm("r1") = _ts; + register clockid_t clkid asm("r0") = _clkid; + register long ret asm ("r0"); + register long nr asm("r7") = __NR_clock_gettime; + + asm volatile( + " swi #0\n" + : "=r" (ret) + : "r" (clkid), "r" (ts), "r" (nr) + : "memory"); + + return ret; +} + +static __always_inline int clock_getres_fallback( + clockid_t _clkid, + struct __kernel_timespec *_ts) +{ + register struct __kernel_timespec *ts asm("r1") = _ts; + register clockid_t clkid asm("r0") = _clkid; + register long ret asm ("r0"); + register long nr asm("r7") = __NR_clock_getres_time64; + + asm volatile( + " swi #0\n" + : "=r" (ret) + : "r" (clkid), "r" (ts), "r" (nr) + : "memory"); + + return ret; +} + +static __always_inline int clock_getres32_fallback( + clockid_t _clkid, + struct old_timespec32 *_ts) +{ + register struct old_timespec32 *ts asm("r1") = _ts; + register clockid_t clkid asm("r0") = _clkid; + register long ret asm ("r0"); + register long nr asm("r7") = __NR_clock_getres; + + asm volatile( + " swi #0\n" + : "=r" (ret) + : "r" (clkid), "r" (ts), "r" (nr) + : "memory"); + + return ret; +} + +static inline bool arm_vdso_hres_capable(void) +{ + return IS_ENABLED(CONFIG_ARM_ARCH_TIMER); +} +#define __arch_vdso_hres_capable arm_vdso_hres_capable + +static __always_inline u64 __arch_get_hw_counter(int clock_mode) +{ +#ifdef CONFIG_ARM_ARCH_TIMER + u64 cycle_now; + + /* + * Core checks for mode already, so this raced against a concurrent + * update. Return something. Core will do another round and then + * see the mode change and fallback to the syscall. + */ + if (clock_mode == VDSO_CLOCKMODE_NONE) + return 0; + + isb(); + cycle_now = read_sysreg(CNTVCT); + + return cycle_now; +#else + /* Make GCC happy. This is compiled out anyway */ + return 0; +#endif +} + +static __always_inline const struct vdso_data *__arch_get_vdso_data(void) +{ + return __get_datapage(); +} + +#endif /* !__ASSEMBLY__ */ + +#endif /* __ASM_VDSO_GETTIMEOFDAY_H */ diff --git a/arch/arm/include/asm/vdso/processor.h b/arch/arm/include/asm/vdso/processor.h new file mode 100644 index 0000000000000000000000000000000000000000..45efb3ff511c9c64a7540ffe7163bdb07e316fdb --- /dev/null +++ b/arch/arm/include/asm/vdso/processor.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2020 ARM Ltd. + */ +#ifndef __ASM_VDSO_PROCESSOR_H +#define __ASM_VDSO_PROCESSOR_H + +#ifndef __ASSEMBLY__ + +#if __LINUX_ARM_ARCH__ == 6 || defined(CONFIG_ARM_ERRATA_754327) +#define cpu_relax() \ + do { \ + smp_mb(); \ + __asm__ __volatile__("nop; nop; nop; nop; nop; nop; nop; nop; nop; nop;"); \ + } while (0) +#else +#define cpu_relax() barrier() +#endif + +#endif /* __ASSEMBLY__ */ + +#endif /* __ASM_VDSO_PROCESSOR_H */ diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index a17f9b250c4f8b667b6f6498b71e5e22733b4a20..3c75ba48bd5c2e6e6eb422ed2dfae997d4bc6dbb 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -116,6 +116,14 @@ EXPORT_SYMBOL(elf_hwcap2); char* (*arch_read_hardware_id)(void); EXPORT_SYMBOL(arch_read_hardware_id); +/* Vendor stub */ +unsigned int boot_reason; +EXPORT_SYMBOL_GPL(boot_reason); + +/* Vendor stub */ +unsigned int cold_boot; +EXPORT_SYMBOL_GPL(cold_boot); + #ifdef MULTI_CPU struct processor processor __ro_after_init; #if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR) diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile index e9cfe8e86f332c0e081bfb8002cd0c0e112b8197..02bf3eab4196c906e774553ad898ee8196df3449 100644 --- a/arch/arm/mach-imx/Makefile +++ b/arch/arm/mach-imx/Makefile @@ -89,8 +89,10 @@ AFLAGS_suspend-imx6.o :=-Wa,-march=armv7-a obj-$(CONFIG_SOC_IMX6) += suspend-imx6.o obj-$(CONFIG_SOC_IMX53) += suspend-imx53.o endif +ifeq ($(CONFIG_ARM_CPU_SUSPEND),y) AFLAGS_resume-imx6.o :=-Wa,-march=armv7-a obj-$(CONFIG_SOC_IMX6) += resume-imx6.o +endif obj-$(CONFIG_SOC_IMX6) += pm-imx6.o obj-$(CONFIG_SOC_IMX1) += mach-imx1.o diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 8c3556637d2528730b319fd3ce63872ee502a6dc..a24802bd3e372bfc0db163ce6bc51e5a33dd7ec8 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -378,7 +378,7 @@ __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot, static void __dma_free_remap(void *cpu_addr, size_t size) { dma_common_free_remap(cpu_addr, size, - VM_ARM_DMA_CONSISTENT | VM_USERMAP); + VM_ARM_DMA_CONSISTENT | VM_USERMAP, false); } #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K @@ -1648,7 +1648,7 @@ void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0) { dma_common_free_remap(cpu_addr, size, - VM_ARM_DMA_CONSISTENT | VM_USERMAP); + VM_ARM_DMA_CONSISTENT | VM_USERMAP, false); } __iommu_remove_mapping(dev, handle, size); diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index 25b3ee85066e16e95652b9963645dab61bcb7bbd..328ced7bfaf26c31d2b6832e604f4cdc1a5a063e 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -930,7 +930,11 @@ static inline void emit_a32_rsh_i64(const s8 dst[], rd = arm_bpf_get_reg64(dst, tmp, ctx); /* Do LSR operation */ - if (val < 32) { + if (val == 0) { + /* An immediate value of 0 encodes a shift amount of 32 + * for LSR. To shift by 0, don't do anything. + */ + } else if (val < 32) { emit(ARM_MOV_SI(tmp2[1], rd[1], SRTYPE_LSR, val), ctx); emit(ARM_ORR_SI(rd[1], tmp2[1], rd[0], SRTYPE_ASL, 32 - val), ctx); emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_LSR, val), ctx); @@ -956,7 +960,11 @@ static inline void emit_a32_arsh_i64(const s8 dst[], rd = arm_bpf_get_reg64(dst, tmp, ctx); /* Do ARSH operation */ - if (val < 32) { + if (val == 0) { + /* An immediate value of 0 encodes a shift amount of 32 + * for ASR. To shift by 0, don't do anything. + */ + } else if (val < 32) { emit(ARM_MOV_SI(tmp2[1], rd[1], SRTYPE_LSR, val), ctx); emit(ARM_ORR_SI(rd[1], tmp2[1], rd[0], SRTYPE_ASL, 32 - val), ctx); emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_ASR, val), ctx); @@ -993,21 +1001,35 @@ static inline void emit_a32_mul_r64(const s8 dst[], const s8 src[], arm_bpf_put_reg32(dst_hi, rd[0], ctx); } +static bool is_ldst_imm(s16 off, const u8 size) +{ + s16 off_max = 0; + + switch (size) { + case BPF_B: + case BPF_W: + off_max = 0xfff; + break; + case BPF_H: + off_max = 0xff; + break; + case BPF_DW: + /* Need to make sure off+4 does not overflow. */ + off_max = 0xfff - 4; + break; + } + return -off_max <= off && off <= off_max; +} + /* *(size *)(dst + off) = src */ static inline void emit_str_r(const s8 dst, const s8 src[], - s32 off, struct jit_ctx *ctx, const u8 sz){ + s16 off, struct jit_ctx *ctx, const u8 sz){ const s8 *tmp = bpf2a32[TMP_REG_1]; - s32 off_max; s8 rd; rd = arm_bpf_get_reg32(dst, tmp[1], ctx); - if (sz == BPF_H) - off_max = 0xff; - else - off_max = 0xfff; - - if (off < 0 || off > off_max) { + if (!is_ldst_imm(off, sz)) { emit_a32_mov_i(tmp[0], off, ctx); emit(ARM_ADD_R(tmp[0], tmp[0], rd), ctx); rd = tmp[0]; @@ -1036,18 +1058,12 @@ static inline void emit_str_r(const s8 dst, const s8 src[], /* dst = *(size*)(src + off) */ static inline void emit_ldx_r(const s8 dst[], const s8 src, - s32 off, struct jit_ctx *ctx, const u8 sz){ + s16 off, struct jit_ctx *ctx, const u8 sz){ const s8 *tmp = bpf2a32[TMP_REG_1]; const s8 *rd = is_stacked(dst_lo) ? tmp : dst; s8 rm = src; - s32 off_max; - - if (sz == BPF_H) - off_max = 0xff; - else - off_max = 0xfff; - if (off < 0 || off > off_max) { + if (!is_ldst_imm(off, sz)) { emit_a32_mov_i(tmp[0], off, ctx); emit(ARM_ADD_R(tmp[0], tmp[0], src), ctx); rm = tmp[0]; diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 105283b01a534cd07d35328109cb22ec568310b5..65e28de057fe24cea8b16a1686aa5529a4aaff3f 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -100,6 +100,7 @@ config ARM64 select GENERIC_STRNCPY_FROM_USER select GENERIC_STRNLEN_USER select GENERIC_TIME_VSYSCALL + select GENERIC_GETTIMEOFDAY select HANDLE_DOMAIN_IRQ select HARDIRQS_SW_RESEND select HAVE_ACPI_APEI if (ACPI && EFI) @@ -138,6 +139,8 @@ config ARM64 select HAVE_GENERIC_DMA_COHERENT select HAVE_HW_BREAKPOINT if PERF_EVENTS select HAVE_IRQ_TIME_ACCOUNTING + select HAVE_KERNEL_GZIP + select HAVE_KERNEL_LZ4 select HAVE_MEMBLOCK select HAVE_MEMBLOCK_NODE_MAP if NUMA select HAVE_NMI @@ -152,6 +155,7 @@ config ARM64 select HAVE_SYSCALL_TRACEPOINTS select HAVE_KPROBES select HAVE_KRETPROBES + select HAVE_GENERIC_VDSO select IOMMU_DMA if IOMMU_SUPPORT select IRQ_DOMAIN select IRQ_FORCED_THREADING @@ -503,6 +507,22 @@ config ARM64_ERRATUM_1463225 If unsure, say Y. +config ARM64_ERRATUM_1542419 + bool "Neoverse-N1: workaround mis-ordering of instruction fetches" + default y + help + This option adds a workaround for ARM Neoverse-N1 erratum + 1542419. + + Affected Neoverse-N1 cores could execute a stale instruction when + modified by another CPU. The workaround depends on a firmware + counterpart. + + Workaround the issue by hiding the DIC feature from EL0. This + forces user-space to perform cache maintenance. + + If unsure, say Y. + config CAVIUM_ERRATUM_22375 bool "Cavium erratum 22375, 24313" default y @@ -752,6 +772,18 @@ config HOTPLUG_CPU Say Y here to experiment with turning CPUs off and on. CPUs can be controlled through /sys/devices/system/cpu. +# The GPIO number here must be sorted by descending number. In case of +# a multiplatform kernel, we just want the highest value required by the +# selected platforms. +config ARCH_NR_GPIO + int "Number of GPIOs in the system" + default 1280 if ARCH_QCOM + default 256 + help + Maximum number of GPIOs in the system. + + If unsure, leave the default value. + # Common NUMA Features config NUMA bool "Numa Memory Allocation and Scheduler Support" @@ -830,6 +862,37 @@ config ARCH_HAS_CACHE_LINE_SIZE config CC_HAVE_SHADOW_CALL_STACK def_bool $(cc-option, -fsanitize=shadow-call-stack -ffixed-x18) +config ARM64_DMA_USE_IOMMU + bool "ARM64 DMA iommu integration" + select ARM_HAS_SG_CHAIN + select NEED_SG_DMA_LENGTH + help + Enable using iommu through the standard dma apis. + dma_alloc_coherent() will allocate scatter-gather memory + which is made virtually contiguous via iommu. + Enable if system contains IOMMU hardware. + +if ARM64_DMA_USE_IOMMU + +config ARM64_DMA_IOMMU_ALIGNMENT + int "Maximum PAGE_SIZE order of alignment for DMA IOMMU buffers" + range 4 9 + default 9 + help + DMA mapping framework by default aligns all buffers to the smallest + PAGE_SIZE order which is greater than or equal to the requested buffer + size. This works well for buffers up to a few hundreds kilobytes, but + for larger buffers it just a waste of address space. Drivers which has + relatively small addressing window (like 64Mib) might run out of + virtual space with just a few allocations. + + With this parameter you can specify the maximum PAGE_SIZE order for + DMA IOMMU buffers. Larger buffers will be aligned only to this + specified order. The order is expressed as a power of two multiplied + by the PAGE_SIZE. + +endif + config SECCOMP bool "Enable seccomp to safely compute untrusted bytecode" ---help--- @@ -981,6 +1044,19 @@ config ARM64_TAGGED_ADDR_ABI to system calls as pointer arguments. For details, see Documentation/arm64/tagged-address-abi.rst. +config COMPAT_VDSO + bool "Enable vDSO for 32-bit applications" + depends on !CPU_BIG_ENDIAN && "$(CROSS_COMPILE_COMPAT)" != "" + select GENERIC_COMPAT_VDSO + default y + help + Place in the process address space of 32-bit applications an + ELF shared object providing fast implementations of gettimeofday + and clock_gettime. + + You must have a 32-bit build of glibc 2.22 or later for programs + to seamlessly take advantage of this. + menuconfig ARMV8_DEPRECATED bool "Emulate deprecated/obsolete ARMv8 instructions" depends on COMPAT @@ -1358,6 +1434,34 @@ config COMPAT If you want to execute 32-bit userspace applications, say Y. +config KUSER_HELPERS + bool "Enable kuser helpers page for 32 bit applications." + depends on COMPAT + default y + help + Warning: disabling this option may break 32-bit user programs. + + Provide kuser helpers to compat tasks. The kernel provides + helper code to userspace in read only form at a fixed location + to allow userspace to be independent of the CPU type fitted to + the system. This permits binaries to be run on ARMv4 through + to ARMv8 without modification. + + See Documentation/arm/kernel_user_helpers.txt for details. + + However, the fixed address nature of these helpers can be used + by ROP (return orientated programming) authors when creating + exploits. + + If all of the binaries and libraries which run on your platform + are built specifically for your platform, and make no use of + these helpers, then you can turn this option off to hinder + such exploits. However, in that case, if a binary or library + relying on those helpers is run, it will not function correctly. + + Say N here only if you are absolutely certain that you do not + need these helpers; otherwise, the safe option is to say Y. + config SYSVIPC_COMPAT def_bool y depends on COMPAT && SYSVIPC diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 49c699c90b4345a8333edb02adb3fa6814fd61df..098c39e17582b71fc6828494d576329e12b76f8f 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -49,10 +49,10 @@ $(warning Detected assembler with broken .inst; disassembly will be unreliable) endif endif -KBUILD_CFLAGS += -mgeneral-regs-only $(lseinstr) $(brokengasinst) +KBUILD_CFLAGS += -mgeneral-regs-only $(lseinstr) $(brokengasinst) $(compat_vdso) KBUILD_CFLAGS += -fno-asynchronous-unwind-tables KBUILD_CFLAGS += $(call cc-disable-warning, psabi) -KBUILD_AFLAGS += $(lseinstr) $(brokengasinst) +KBUILD_AFLAGS += $(lseinstr) $(brokengasinst) $(compat_vdso) KBUILD_CFLAGS += $(call cc-option,-mabi=lp64) KBUILD_AFLAGS += $(call cc-option,-mabi=lp64) @@ -171,6 +171,9 @@ ifeq ($(KBUILD_EXTMOD),) prepare: vdso_prepare vdso_prepare: prepare0 $(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso include/generated/vdso-offsets.h + $(if $(CONFIG_COMPAT_VDSO),$(Q)$(MAKE) \ + $(build)=arch/arm64/kernel/vdso32 \ + include/generated/vdso32-offsets.h) endif define archhelp diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi index bd43912696111be8cbcdcc5aceb2e7ee8704d09e..6e4e4590773899bf43aeadc8b8474624a6d960d9 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi +++ b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi @@ -70,8 +70,7 @@ }; pmu { - compatible = "arm,cortex-a53-pmu", - "arm,armv8-pmuv3"; + compatible = "arm,cortex-a53-pmu"; interrupts = , , , diff --git a/arch/arm64/configs/gki_defconfig b/arch/arm64/configs/gki_defconfig index ce5c914e4773e934a3a806959180f0bcee773e88..bd76c6bf56781e618537bd16410f847ad4c2df19 100644 --- a/arch/arm64/configs/gki_defconfig +++ b/arch/arm64/configs/gki_defconfig @@ -2,6 +2,7 @@ CONFIG_AUDIT=y CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y CONFIG_PREEMPT=y +CONFIG_IRQ_TIME_ACCOUNTING=y CONFIG_TASKSTATS=y CONFIG_TASK_XACCT=y CONFIG_TASK_IO_ACCOUNTING=y @@ -14,12 +15,12 @@ CONFIG_MEMCG=y CONFIG_MEMCG_SWAP=y CONFIG_BLK_CGROUP=y CONFIG_CGROUP_SCHED=y +# CONFIG_FAIR_GROUP_SCHED is not set CONFIG_CGROUP_FREEZER=y CONFIG_CPUSETS=y CONFIG_CGROUP_CPUACCT=y CONFIG_CGROUP_BPF=y CONFIG_NAMESPACES=y -# CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_SCHED_TUNE=y CONFIG_BLK_DEV_INITRD=y @@ -49,6 +50,7 @@ CONFIG_SCHED_MC=y CONFIG_NR_CPUS=32 CONFIG_SECCOMP=y CONFIG_PARAVIRT=y +# CONFIG_ARM64_TAGGED_ADDR_ABI is not set CONFIG_ARMV8_DEPRECATED=y CONFIG_SWP_EMULATION=y CONFIG_CP15_BARRIER_EMULATION=y @@ -64,6 +66,7 @@ CONFIG_ENERGY_MODEL=y CONFIG_CPU_IDLE=y CONFIG_ARM_CPUIDLE=y CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_STAT=y CONFIG_CPU_FREQ_TIMES=y CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL=y CONFIG_CPU_FREQ_GOV_POWERSAVE=y @@ -94,6 +97,7 @@ CONFIG_CLEANCACHE=y CONFIG_CMA=y CONFIG_CMA_AREAS=16 CONFIG_ZSMALLOC=y +CONFIG_MM_EVENT_STAT=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -106,7 +110,9 @@ CONFIG_INET=y CONFIG_IP_MULTICAST=y CONFIG_IP_ADVANCED_ROUTER=y CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_NET_IPIP=y CONFIG_NET_IPGRE_DEMUX=y +CONFIG_NET_IPGRE=y CONFIG_NET_IPVTI=y CONFIG_INET_ESP=y CONFIG_INET_UDP_DIAG=y @@ -118,6 +124,7 @@ CONFIG_INET6_ESP=y CONFIG_INET6_IPCOMP=y CONFIG_IPV6_MIP6=y CONFIG_IPV6_VTI=y +CONFIG_IPV6_GRE=y CONFIG_IPV6_MULTIPLE_TABLES=y CONFIG_IPV6_SUBTREES=y CONFIG_NETFILTER=y @@ -209,6 +216,8 @@ CONFIG_CFG80211=y CONFIG_MAC80211=y CONFIG_RFKILL=y # CONFIG_UEVENT_HELPER is not set +CONFIG_FW_LOADER_USER_HELPER=y +CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y # CONFIG_FW_CACHE is not set # CONFIG_ALLOW_DEV_COREDUMP is not set CONFIG_DMA_CMA=y @@ -221,8 +230,9 @@ CONFIG_UID_SYS_STATS=y CONFIG_SCSI=y # CONFIG_SCSI_PROC_FS is not set CONFIG_BLK_DEV_SD=y -CONFIG_SCSI_UFSHCD=y -CONFIG_SCSI_UFSHCD_PLATFORM=y +CONFIG_CHR_DEV_SG=y +CONFIG_SCSI_UFSHCD=m +CONFIG_SCSI_UFSHCD_PLATFORM=m CONFIG_SCSI_UFS_CRYPTO=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y @@ -237,6 +247,7 @@ CONFIG_DM_BOW=y CONFIG_NETDEVICES=y CONFIG_DUMMY=y CONFIG_TUN=y +CONFIG_VETH=y # CONFIG_ETHERNET is not set CONFIG_PHYLIB=y CONFIG_PPP=y @@ -288,31 +299,39 @@ CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_OF_PLATFORM=y CONFIG_SERIAL_AMBA_PL011=y CONFIG_SERIAL_AMBA_PL011_CONSOLE=y +CONFIG_SERIAL_MSM_GENI_EARLY_CONSOLE=y CONFIG_SERIAL_DEV_BUS=y CONFIG_HW_RANDOM=y # CONFIG_HW_RANDOM_CAVIUM is not set # CONFIG_DEVPORT is not set # CONFIG_I2C_COMPAT is not set +CONFIG_I2C_CHARDEV=y # CONFIG_I2C_HELPER_AUTO is not set CONFIG_SPI=y CONFIG_SPMI=y +# CONFIG_SPMI_MSM_PMIC_ARB is not set CONFIG_PINCTRL_AMD=y +CONFIG_GPIO_SYSFS=y CONFIG_POWER_AVS=y CONFIG_POWER_RESET_HISI=y -# CONFIG_HWMON is not set CONFIG_THERMAL=y +CONFIG_THERMAL_STATISTICS=y +CONFIG_THERMAL_WRITABLE_TRIPS=y CONFIG_THERMAL_GOV_USER_SPACE=y CONFIG_CPU_THERMAL=y CONFIG_DEVFREQ_THERMAL=y +CONFIG_THERMAL_EMULATION=y CONFIG_WATCHDOG=y CONFIG_WATCHDOG_CORE=y CONFIG_MFD_ACT8945A=y CONFIG_MFD_SYSCON=y CONFIG_REGULATOR=y CONFIG_REGULATOR_FIXED_VOLTAGE=y +CONFIG_REGULATOR_PROXY_CONSUMER=y CONFIG_MEDIA_SUPPORT=y CONFIG_MEDIA_CAMERA_SUPPORT=y CONFIG_MEDIA_CONTROLLER=y +CONFIG_VIDEO_V4L2_SUBDEV_API=y # CONFIG_VGA_ARB is not set CONFIG_DRM=y # CONFIG_DRM_FBDEV_EMULATION is not set @@ -325,7 +344,6 @@ CONFIG_SND=y CONFIG_SND_HRTIMER=y CONFIG_SND_DYNAMIC_MINORS=y # CONFIG_SND_SUPPORT_OLD_API is not set -# CONFIG_SND_VERBOSE_PROCFS is not set # CONFIG_SND_DRIVERS is not set CONFIG_SND_USB_AUDIO=y CONFIG_SND_SOC=y @@ -354,14 +372,15 @@ CONFIG_TYPEC=y CONFIG_MMC=y # CONFIG_PWRSEQ_EMMC is not set # CONFIG_PWRSEQ_SIMPLE is not set +CONFIG_MMC_CRYPTO=y CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y CONFIG_LEDS_TRIGGERS=y +CONFIG_LEDS_TRIGGER_TRANSIENT=y CONFIG_EDAC=y CONFIG_RTC_CLASS=y -# CONFIG_RTC_SYSTOHC is not set CONFIG_RTC_DRV_PL030=y CONFIG_RTC_DRV_PL031=y CONFIG_DMADEVICES=y @@ -373,7 +392,10 @@ CONFIG_COMMON_CLK_SCPI=y # CONFIG_COMMON_CLK_XGENE is not set CONFIG_HWSPINLOCK=y CONFIG_MAILBOX=y -CONFIG_ARM_SMMU=y +CONFIG_IOMMU_IO_PGTABLE_FAST=y +CONFIG_ARM_SMMU=m +CONFIG_REMOTEPROC=y +CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y CONFIG_DEVFREQ_GOV_PERFORMANCE=y CONFIG_DEVFREQ_GOV_POWERSAVE=y CONFIG_DEVFREQ_GOV_USERSPACE=y @@ -464,6 +486,8 @@ CONFIG_SECURITY=y CONFIG_SECURITYFS=y CONFIG_SECURITY_NETWORK=y CONFIG_HARDENED_USERCOPY=y +CONFIG_FORTIFY_SOURCE=y +CONFIG_STATIC_USERMODEHELPER=y CONFIG_SECURITY_SELINUX=y CONFIG_INIT_STACK_ALL=y CONFIG_INIT_ON_ALLOC_DEFAULT_ON=y @@ -487,5 +511,3 @@ CONFIG_PANIC_TIMEOUT=5 CONFIG_SCHEDSTATS=y # CONFIG_DEBUG_PREEMPT is not set CONFIG_DEBUG_LIST=y -CONFIG_CORESIGHT=y -CONFIG_CORESIGHT_STM=y diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h index 88dab0d891258c5a9836345b0059912f89cd5295..147f3ebe28550d6546caa860eba704f5abf52241 100644 --- a/arch/arm64/include/asm/cache.h +++ b/arch/arm64/include/asm/cache.h @@ -22,6 +22,7 @@ #define CTR_L1IP_MASK 3 #define CTR_DMINLINE_SHIFT 16 #define CTR_IMINLINE_SHIFT 0 +#define CTR_IMINLINE_MASK 0xf #define CTR_ERG_SHIFT 20 #define CTR_CWG_SHIFT 24 #define CTR_CWG_MASK 15 @@ -29,7 +30,7 @@ #define CTR_DIC_SHIFT 29 #define CTR_CACHE_MINLINE_MASK \ - (0xf << CTR_DMINLINE_SHIFT | 0xf << CTR_IMINLINE_SHIFT) + (0xf << CTR_DMINLINE_SHIFT | CTR_IMINLINE_MASK << CTR_IMINLINE_SHIFT) #define CTR_L1IP(ctr) (((ctr) >> CTR_L1IP_SHIFT) & CTR_L1IP_MASK) diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h index 93adc5f9e7e786c83f9031ad78ad84bad80634f8..2ac655f36a10482b7586e3bcc32f13e4e93fe3e2 100644 --- a/arch/arm64/include/asm/cacheflush.h +++ b/arch/arm64/include/asm/cacheflush.h @@ -131,6 +131,15 @@ static inline void flush_cache_range(struct vm_area_struct *vma, extern void __dma_map_area(const void *, size_t, int); extern void __dma_unmap_area(const void *, size_t, int); extern void __dma_flush_area(const void *, size_t); +extern void __dma_inv_area(const void *start, size_t size); +extern void __dma_clean_area(const void *start, size_t size); + +#define dmac_flush_range(start, end) \ + __dma_flush_area(start, (void *)(end) - (void *)(start)) +#define dmac_inv_range(start, end) \ + __dma_inv_area(start, (void *)(end) - (void *)(start)) +#define dmac_clean_range(start, end) \ + __dma_clean_area(start, (void *)(end) - (void *)(start)) /* * Copy user data from/to a page which is mapped into a different diff --git a/arch/arm64/include/asm/clocksource.h b/arch/arm64/include/asm/clocksource.h index 0ece64a26c8c94d04c7aac8a35b7bc8dd0000b85..482185566b0cfa3f2dceb8beb8a5f42e53f14fc9 100644 --- a/arch/arm64/include/asm/clocksource.h +++ b/arch/arm64/include/asm/clocksource.h @@ -2,8 +2,6 @@ #ifndef _ASM_CLOCKSOURCE_H #define _ASM_CLOCKSOURCE_H -struct arch_clocksource_data { - bool vdso_direct; /* Usable for direct VDSO access? */ -}; +#include #endif diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h index 659e5a6257a70093d285bdc82242c3fa8b6472d8..0cfdf8d5f95c2596eaaaccb61451613b6ac3adc9 100644 --- a/arch/arm64/include/asm/cpucaps.h +++ b/arch/arm64/include/asm/cpucaps.h @@ -53,8 +53,9 @@ #define ARM64_HAS_STAGE2_FWB 32 #define ARM64_WORKAROUND_1463225 33 #define ARM64_SSBS 34 +#define ARM64_WORKAROUND_1542419 35 -/* kabi: reserve 35 - 62 for future cpu capabilities */ +/* kabi: reserve 36 - 62 for future cpu capabilities */ #define ARM64_NCAPS 62 #endif /* __ASM_CPUCAPS_H */ diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index fa770c070fddf3f46835fc392243b41d76d4e8ad..3cd936b1c79c1701a1c24ab924898ababf434b8b 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -80,6 +80,7 @@ #define ARM_CPU_PART_CORTEX_A35 0xD04 #define ARM_CPU_PART_CORTEX_A55 0xD05 #define ARM_CPU_PART_CORTEX_A76 0xD0B +#define ARM_CPU_PART_NEOVERSE_N1 0xD0C #define APM_CPU_PART_POTENZA 0x000 @@ -107,6 +108,7 @@ #define MIDR_CORTEX_A35 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A35) #define MIDR_CORTEX_A55 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A55) #define MIDR_CORTEX_A76 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76) +#define MIDR_NEOVERSE_N1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N1) #define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX) #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX) #define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX) diff --git a/arch/arm64/include/asm/device.h b/arch/arm64/include/asm/device.h index 5a5fa47a6b18bbd5b29f6dde5fb9201a524ddc2e..859a5ca54269c974f80beba728000a38c70cba24 100644 --- a/arch/arm64/include/asm/device.h +++ b/arch/arm64/include/asm/device.h @@ -24,9 +24,18 @@ struct dev_archdata { const struct dma_map_ops *dev_dma_ops; #endif bool dma_coherent; +#ifdef CONFIG_ARM64_DMA_USE_IOMMU + struct dma_iommu_mapping *mapping; +#endif }; struct pdev_archdata { }; +#ifdef CONFIG_ARM64_DMA_USE_IOMMU +#define to_dma_iommu_mapping(dev) ((dev)->archdata.mapping) +#else +#define to_dma_iommu_mapping(dev) NULL +#endif + #endif diff --git a/arch/arm64/include/asm/dma-contiguous.h b/arch/arm64/include/asm/dma-contiguous.h new file mode 100644 index 0000000000000000000000000000000000000000..0896ee13c0b7b3371c053ea3631f2184a84ccfe3 --- /dev/null +++ b/arch/arm64/include/asm/dma-contiguous.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2013,2017-2018 The Linux Foundation. All rights reserved. + */ + +#ifndef _ASM_DMA_CONTIGUOUS_H +#define _ASM_DMA_CONTIGUOUS_H + +#ifdef __KERNEL__ +#ifdef CONFIG_DMA_CMA + +#include + +void dma_contiguous_early_fixup(phys_addr_t base, unsigned long size); + +#endif +#endif + +#endif diff --git a/arch/arm64/include/asm/dma-iommu.h b/arch/arm64/include/asm/dma-iommu.h new file mode 100644 index 0000000000000000000000000000000000000000..80397db4b40e3cd3ae4828e74c56eb337f528880 --- /dev/null +++ b/arch/arm64/include/asm/dma-iommu.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef ASMARM_DMA_IOMMU_H +#define ASMARM_DMA_IOMMU_H + +#ifdef __KERNEL__ + +#include +#include +#include +#include +#include +#include + +struct dma_iommu_mapping { + /* iommu specific data */ + struct iommu_domain *domain; + bool init; + struct kref kref; + const struct dma_map_ops *ops; + + /* Protects bitmap */ + spinlock_t lock; + void *bitmap; + size_t bits; + dma_addr_t base; + + struct dma_fast_smmu_mapping *fast; +}; + +#ifdef CONFIG_ARM64_DMA_USE_IOMMU +void arm_iommu_put_dma_cookie(struct iommu_domain *domain); +#else /* !CONFIG_ARM64_DMA_USE_IOMMU */ +static inline void arm_iommu_put_dma_cookie(struct iommu_domain *domain) {} +#endif /* CONFIG_ARM64_DMA_USE_IOMMU */ + +#endif /* __KERNEL__ */ +#endif diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h index 433b9554c6a19c1e72b2be8f827861c14f5856fb..5f571b2e6c980345eda76e675ccc59b315089f57 100644 --- a/arch/arm64/include/asm/elf.h +++ b/arch/arm64/include/asm/elf.h @@ -209,11 +209,25 @@ typedef compat_elf_greg_t compat_elf_gregset_t[COMPAT_ELF_NGREG]; ({ \ set_thread_flag(TIF_32BIT); \ }) +#ifdef CONFIG_COMPAT_VDSO +#define COMPAT_ARCH_DLINFO \ +do { \ + /* \ + * Note that we use Elf64_Off instead of elf_addr_t because \ + * elf_addr_t in compat is defined as Elf32_Addr and casting \ + * current->mm->context.vdso to it triggers a cast warning of \ + * cast from pointer to integer of different size. \ + */ \ + NEW_AUX_ENT(AT_SYSINFO_EHDR, \ + (Elf64_Off)current->mm->context.vdso); \ +} while (0) +#else #define COMPAT_ARCH_DLINFO -extern int aarch32_setup_vectors_page(struct linux_binprm *bprm, - int uses_interp); +#endif +extern int aarch32_setup_additional_pages(struct linux_binprm *bprm, + int uses_interp); #define compat_arch_setup_additional_pages \ - aarch32_setup_vectors_page + aarch32_setup_additional_pages #endif /* CONFIG_COMPAT */ diff --git a/arch/arm64/include/asm/lse.h b/arch/arm64/include/asm/lse.h index 8be3b0f8518280d383b4799917660991ec270ad0..90894d8e79604c071003091ad1636f8f40c6fa3a 100644 --- a/arch/arm64/include/asm/lse.h +++ b/arch/arm64/include/asm/lse.h @@ -21,7 +21,7 @@ #else /* __ASSEMBLER__ */ #ifdef CONFIG_LTO_CLANG -#define __LSE_PREAMBLE ".arch armv8-a+lse\n" +#define __LSE_PREAMBLE ".arch_extension lse\n" #else __asm__(".arch_extension lse"); #define __LSE_PREAMBLE diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index bfb6d67059610febc7abfee91b70812cf4680153..e9af2b4e70f7a58ae80da925b4c33a7a00626792 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -40,6 +40,8 @@ #include #include +#include + #include #include #include @@ -53,7 +55,7 @@ * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area. */ #ifdef CONFIG_COMPAT -#ifdef CONFIG_ARM64_64K_PAGES +#if defined(CONFIG_ARM64_64K_PAGES) && defined(CONFIG_KUSER_HELPERS) /* * With CONFIG_ARM64_64K_PAGES enabled, the last page is occupied * by the compat vectors page. @@ -227,11 +229,6 @@ extern void release_thread(struct task_struct *); unsigned long get_wchan(struct task_struct *p); -static inline void cpu_relax(void) -{ - asm volatile("yield" ::: "memory"); -} - /* Thread switching */ extern struct task_struct *cpu_switch_to(struct task_struct *prev, struct task_struct *next); diff --git a/arch/arm64/include/asm/signal32.h b/arch/arm64/include/asm/signal32.h index 81abea0b7650867d86385fc0c4fc145fefd24c46..1f05268f4c6d7b8768517d8687f9e753db43250c 100644 --- a/arch/arm64/include/asm/signal32.h +++ b/arch/arm64/include/asm/signal32.h @@ -20,7 +20,51 @@ #ifdef CONFIG_COMPAT #include -#define AARCH32_KERN_SIGRET_CODE_OFFSET 0x500 +struct compat_sigcontext { + /* We always set these two fields to 0 */ + compat_ulong_t trap_no; + compat_ulong_t error_code; + + compat_ulong_t oldmask; + compat_ulong_t arm_r0; + compat_ulong_t arm_r1; + compat_ulong_t arm_r2; + compat_ulong_t arm_r3; + compat_ulong_t arm_r4; + compat_ulong_t arm_r5; + compat_ulong_t arm_r6; + compat_ulong_t arm_r7; + compat_ulong_t arm_r8; + compat_ulong_t arm_r9; + compat_ulong_t arm_r10; + compat_ulong_t arm_fp; + compat_ulong_t arm_ip; + compat_ulong_t arm_sp; + compat_ulong_t arm_lr; + compat_ulong_t arm_pc; + compat_ulong_t arm_cpsr; + compat_ulong_t fault_address; +}; + +struct compat_ucontext { + compat_ulong_t uc_flags; + compat_uptr_t uc_link; + compat_stack_t uc_stack; + struct compat_sigcontext uc_mcontext; + compat_sigset_t uc_sigmask; + int __unused[32 - (sizeof(compat_sigset_t) / sizeof(int))]; + compat_ulong_t uc_regspace[128] __attribute__((__aligned__(8))); +}; + +struct compat_sigframe { + struct compat_ucontext uc; + compat_ulong_t retcode[2]; +}; + +struct compat_rt_sigframe { + struct compat_siginfo info; + struct compat_sigframe sig; +}; int compat_setup_frame(int usig, struct ksignal *ksig, sigset_t *set, struct pt_regs *regs); diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 9d9461fb0b2b6132cb070d7e645647d61bfa045b..fbe0c5855b6017650a8b8fdabf5f362bc940544c 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -60,7 +60,9 @@ #ifndef CONFIG_BROKEN_GAS_INST #ifdef __ASSEMBLY__ -#define __emit_inst(x) .inst (x) +// The space separator is omitted so that __emit_inst(x) can be parsed as +// either an assembler directive or an assembler macro argument. +#define __emit_inst(x) .inst(x) #else #define __emit_inst(x) ".inst " __stringify((x)) "\n\t" #endif diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h index 47b822ec3c6ef8ca10eb2c74525a8144b29413a7..b4910b4e87045160afc3f360a85c871148fe909a 100644 --- a/arch/arm64/include/asm/unistd.h +++ b/arch/arm64/include/asm/unistd.h @@ -33,8 +33,13 @@ #define __NR_compat_exit 1 #define __NR_compat_read 3 #define __NR_compat_write 4 +#define __NR_compat_gettimeofday 78 #define __NR_compat_sigreturn 119 #define __NR_compat_rt_sigreturn 173 +#define __NR_compat_clock_gettime 263 +#define __NR_compat_clock_getres 264 +#define __NR_compat_clock_gettime64 403 +#define __NR_compat_clock_getres_time64 406 /* * The following SVCs are ARM private. diff --git a/arch/arm64/include/asm/vdso.h b/arch/arm64/include/asm/vdso.h index 839ce0031bd58a076893e844aa12c9a8ed4d33aa..9b197e5ea7594fc590028c5e9142e6f42a6aa8be 100644 --- a/arch/arm64/include/asm/vdso.h +++ b/arch/arm64/include/asm/vdso.h @@ -28,6 +28,9 @@ #ifndef __ASSEMBLY__ #include +#ifdef CONFIG_COMPAT_VDSO +#include +#endif #define VDSO_SYMBOL(base, name) \ ({ \ diff --git a/arch/arm64/include/asm/vdso/clocksource.h b/arch/arm64/include/asm/vdso/clocksource.h new file mode 100644 index 0000000000000000000000000000000000000000..1a4776712b95f7f67a597d8961746a724689fff7 --- /dev/null +++ b/arch/arm64/include/asm/vdso/clocksource.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_VDSOCLOCKSOURCE_H +#define __ASM_VDSOCLOCKSOURCE_H + +struct arch_clocksource_data { + bool vdso_direct; /* Usable for direct VDSO access? */ +}; + +#endif diff --git a/arch/arm64/include/asm/vdso/compat_barrier.h b/arch/arm64/include/asm/vdso/compat_barrier.h new file mode 100644 index 0000000000000000000000000000000000000000..d44313a5d0332a680810ebc170c0f0c9f4a9aaf5 --- /dev/null +++ b/arch/arm64/include/asm/vdso/compat_barrier.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2018 ARM Limited + */ +#ifndef __COMPAT_BARRIER_H +#define __COMPAT_BARRIER_H + +#ifndef __ASSEMBLY__ +/* + * Warning: This code is meant to be used with + * ENABLE_COMPAT_VDSO only. + */ +#ifndef ENABLE_COMPAT_VDSO +#error This header is meant to be used with ENABLE_COMPAT_VDSO only +#endif + +#ifdef dmb +#undef dmb +#endif + +#if __LINUX_ARM_ARCH__ >= 7 +#define dmb(option) __asm__ __volatile__ ("dmb " #option : : : "memory") +#elif __LINUX_ARM_ARCH__ == 6 +#define dmb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \ + : : "r" (0) : "memory") +#else +#define dmb(x) __asm__ __volatile__ ("" : : : "memory") +#endif + +#if __LINUX_ARM_ARCH__ >= 8 && defined(CONFIG_AS_DMB_ISHLD) +#define aarch32_smp_mb() dmb(ish) +#define aarch32_smp_rmb() dmb(ishld) +#define aarch32_smp_wmb() dmb(ishst) +#else +#define aarch32_smp_mb() dmb(ish) +#define aarch32_smp_rmb() aarch32_smp_mb() +#define aarch32_smp_wmb() dmb(ishst) +#endif + + +#undef smp_mb +#undef smp_rmb +#undef smp_wmb + +#define smp_mb() aarch32_smp_mb() +#define smp_rmb() aarch32_smp_rmb() +#define smp_wmb() aarch32_smp_wmb() + +#endif /* !__ASSEMBLY__ */ + +#endif /* __COMPAT_BARRIER_H */ diff --git a/arch/arm64/include/asm/vdso/compat_gettimeofday.h b/arch/arm64/include/asm/vdso/compat_gettimeofday.h new file mode 100644 index 0000000000000000000000000000000000000000..a836e219d50aa6a71536b20ac307c198a43937db --- /dev/null +++ b/arch/arm64/include/asm/vdso/compat_gettimeofday.h @@ -0,0 +1,124 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2018 ARM Limited + */ +#ifndef __ASM_VDSO_GETTIMEOFDAY_H +#define __ASM_VDSO_GETTIMEOFDAY_H + +#ifndef __ASSEMBLY__ + +#include +#include + +#include + +#define __VDSO_USE_SYSCALL ULLONG_MAX + +#define VDSO_HAS_CLOCK_GETRES 1 + +#define BUILD_VDSO32 1 + +static __always_inline +int gettimeofday_fallback(struct __kernel_old_timeval *_tv, + struct timezone *_tz) +{ + register struct timezone *tz asm("r1") = _tz; + register struct __kernel_old_timeval *tv asm("r0") = _tv; + register long ret asm ("r0"); + register long nr asm("r7") = __NR_compat_gettimeofday; + + asm volatile( + " swi #0\n" + : "=r" (ret) + : "r" (tv), "r" (tz), "r" (nr) + : "memory"); + + return ret; +} + +static __always_inline +long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts) +{ + register struct __kernel_timespec *ts asm("r1") = _ts; + register clockid_t clkid asm("r0") = _clkid; + register long ret asm ("r0"); + register long nr asm("r7") = __NR_compat_clock_gettime; + + asm volatile( + " swi #0\n" + : "=r" (ret) + : "r" (clkid), "r" (ts), "r" (nr) + : "memory"); + + return ret; +} + +static __always_inline +int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts) +{ + register struct __kernel_timespec *ts asm("r1") = _ts; + register clockid_t clkid asm("r0") = _clkid; + register long ret asm ("r0"); + register long nr asm("r7") = __NR_compat_clock_getres; + + asm volatile( + " swi #0\n" + : "=r" (ret) + : "r" (clkid), "r" (ts), "r" (nr) + : "memory"); + + return ret; +} + +static __always_inline u64 __arch_get_hw_counter(s32 clock_mode) +{ + u64 res; + + /* + * clock_mode == 0 implies that vDSO are enabled otherwise + * fallback on syscall. + */ + if (clock_mode) + return __VDSO_USE_SYSCALL; + + /* + * This isb() is required to prevent that the counter value + * is speculated. + */ + isb(); + asm volatile("mrrc p15, 1, %Q0, %R0, c14" : "=r" (res)); + /* + * This isb() is required to prevent that the seq lock is + * speculated. + */ + isb(); + + return res; +} + +static __always_inline const struct vdso_data *__arch_get_vdso_data(void) +{ + const struct vdso_data *ret; + + /* + * This simply puts &_vdso_data into ret. The reason why we don't use + * `ret = _vdso_data` is that the compiler tends to optimise this in a + * very suboptimal way: instead of keeping &_vdso_data in a register, + * it goes through a relocation almost every time _vdso_data must be + * accessed (even in subfunctions). This is both time and space + * consuming: each relocation uses a word in the code section, and it + * has to be loaded at runtime. + * + * This trick hides the assignment from the compiler. Since it cannot + * track where the pointer comes from, it will only use one relocation + * where __arch_get_vdso_data() is called, and then keep the result in + * a register. + */ + asm volatile("mov %0, %1" : "=r"(ret) : "r"(_vdso_data)); + + return ret; +} + +#endif /* !__ASSEMBLY__ */ + +#endif /* __ASM_VDSO_GETTIMEOFDAY_H */ diff --git a/arch/arm64/include/asm/vdso/gettimeofday.h b/arch/arm64/include/asm/vdso/gettimeofday.h new file mode 100644 index 0000000000000000000000000000000000000000..cc3456416096133526e735eb51f5fffca00005f1 --- /dev/null +++ b/arch/arm64/include/asm/vdso/gettimeofday.h @@ -0,0 +1,102 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2018 ARM Limited + */ +#ifndef __ASM_VDSO_GETTIMEOFDAY_H +#define __ASM_VDSO_GETTIMEOFDAY_H + +#ifndef __ASSEMBLY__ + +#include + +#define __VDSO_USE_SYSCALL ULLONG_MAX + +#define VDSO_HAS_CLOCK_GETRES 1 + +static __always_inline +int gettimeofday_fallback(struct __kernel_old_timeval *_tv, + struct timezone *_tz) +{ + register struct timezone *tz asm("x1") = _tz; + register struct __kernel_old_timeval *tv asm("x0") = _tv; + register long ret asm ("x0"); + register long nr asm("x8") = __NR_gettimeofday; + + asm volatile( + " svc #0\n" + : "=r" (ret) + : "r" (tv), "r" (tz), "r" (nr) + : "memory"); + + return ret; +} + +static __always_inline +long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts) +{ + register struct __kernel_timespec *ts asm("x1") = _ts; + register clockid_t clkid asm("x0") = _clkid; + register long ret asm ("x0"); + register long nr asm("x8") = __NR_clock_gettime; + + asm volatile( + " svc #0\n" + : "=r" (ret) + : "r" (clkid), "r" (ts), "r" (nr) + : "memory"); + + return ret; +} + +static __always_inline +int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts) +{ + register struct __kernel_timespec *ts asm("x1") = _ts; + register clockid_t clkid asm("x0") = _clkid; + register long ret asm ("x0"); + register long nr asm("x8") = __NR_clock_getres; + + asm volatile( + " svc #0\n" + : "=r" (ret) + : "r" (clkid), "r" (ts), "r" (nr) + : "memory"); + + return ret; +} + +static __always_inline u64 __arch_get_hw_counter(s32 clock_mode) +{ + u64 res; + + /* + * clock_mode == 0 implies that vDSO are enabled otherwise + * fallback on syscall. + */ + if (clock_mode) + return __VDSO_USE_SYSCALL; + + /* + * This isb() is required to prevent that the counter value + * is speculated. + */ + isb(); + asm volatile("mrs %0, cntvct_el0" : "=r" (res) :: "memory"); + /* + * This isb() is required to prevent that the seq lock is + * speculated.# + */ + isb(); + + return res; +} + +static __always_inline +const struct vdso_data *__arch_get_vdso_data(void) +{ + return _vdso_data; +} + +#endif /* !__ASSEMBLY__ */ + +#endif /* __ASM_VDSO_GETTIMEOFDAY_H */ diff --git a/arch/arm64/include/asm/vdso/processor.h b/arch/arm64/include/asm/vdso/processor.h new file mode 100644 index 0000000000000000000000000000000000000000..ff830b766ad2d9227cd8fbe2e59f91bba918c4a3 --- /dev/null +++ b/arch/arm64/include/asm/vdso/processor.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2020 ARM Ltd. + */ +#ifndef __ASM_VDSO_PROCESSOR_H +#define __ASM_VDSO_PROCESSOR_H + +#ifndef __ASSEMBLY__ + +static inline void cpu_relax(void) +{ + asm volatile("yield" ::: "memory"); +} + +#endif /* __ASSEMBLY__ */ + +#endif /* __ASM_VDSO_PROCESSOR_H */ diff --git a/arch/arm64/include/asm/vdso/vsyscall.h b/arch/arm64/include/asm/vdso/vsyscall.h new file mode 100644 index 0000000000000000000000000000000000000000..0c731bfc7c8c763a1644c98d291be3454b63f760 --- /dev/null +++ b/arch/arm64/include/asm/vdso/vsyscall.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_VDSO_VSYSCALL_H +#define __ASM_VDSO_VSYSCALL_H + +#ifndef __ASSEMBLY__ + +#include +#include + +#define VDSO_PRECISION_MASK ~(0xFF00ULL<<48) + +extern struct vdso_data *vdso_data; + +/* + * Update the vDSO data page to keep in sync with kernel timekeeping. + */ +static __always_inline +struct vdso_data *__arm64_get_k_vdso_data(void) +{ + return vdso_data; +} +#define __arch_get_k_vdso_data __arm64_get_k_vdso_data + +static __always_inline +int __arm64_get_clock_mode(struct timekeeper *tk) +{ + u32 use_syscall = !tk->tkr_mono.clock->archdata.vdso_direct; + + return use_syscall; +} +#define __arch_get_clock_mode __arm64_get_clock_mode + +static __always_inline +int __arm64_use_vsyscall(struct vdso_data *vdata) +{ + return !vdata[CS_HRES_COARSE].clock_mode; +} +#define __arch_use_vsyscall __arm64_use_vsyscall + +static __always_inline +void __arm64_update_vsyscall(struct vdso_data *vdata, struct timekeeper *tk) +{ + vdata[CS_HRES_COARSE].mask = VDSO_PRECISION_MASK; + vdata[CS_RAW].mask = VDSO_PRECISION_MASK; +} +#define __arch_update_vsyscall __arm64_update_vsyscall + +/* The asm-generic header needs to be included after the definitions above */ +#include + +#endif /* !__ASSEMBLY__ */ + +#endif /* __ASM_VDSO_VSYSCALL_H */ diff --git a/arch/arm64/include/asm/vdso_datapage.h b/arch/arm64/include/asm/vdso_datapage.h deleted file mode 100644 index f89263c8e11affe95f628b848dd344a766860b9c..0000000000000000000000000000000000000000 --- a/arch/arm64/include/asm/vdso_datapage.h +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright (C) 2012 ARM Limited - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ -#ifndef __ASM_VDSO_DATAPAGE_H -#define __ASM_VDSO_DATAPAGE_H - -#ifdef __KERNEL__ - -#ifndef __ASSEMBLY__ - -struct vdso_data { - __u64 cs_cycle_last; /* Timebase at clocksource init */ - __u64 raw_time_sec; /* Raw time */ - __u64 raw_time_nsec; - __u64 xtime_clock_sec; /* Kernel time */ - __u64 xtime_clock_nsec; - __u64 xtime_coarse_sec; /* Coarse time */ - __u64 xtime_coarse_nsec; - __u64 wtm_clock_sec; /* Wall to monotonic time */ - __u64 wtm_clock_nsec; - __u32 tb_seq_count; /* Timebase sequence counter */ - /* cs_* members must be adjacent and in this order (ldp accesses) */ - __u32 cs_mono_mult; /* NTP-adjusted clocksource multiplier */ - __u32 cs_shift; /* Clocksource shift (mono = raw) */ - __u32 cs_raw_mult; /* Raw clocksource multiplier */ - __u32 tz_minuteswest; /* Whacky timezone stuff */ - __u32 tz_dsttime; - __u32 use_syscall; - __u32 hrtimer_res; -}; - -#endif /* !__ASSEMBLY__ */ - -#endif /* __KERNEL__ */ - -#endif /* __ASM_VDSO_DATAPAGE_H */ diff --git a/arch/arm64/include/uapi/asm/setup.h b/arch/arm64/include/uapi/asm/setup.h index 5d703888f35110d2a7a15ec1836bffcffae3c8bb..85e34aa784e0ca6efc419a8bdb971fa9180a5a1b 100644 --- a/arch/arm64/include/uapi/asm/setup.h +++ b/arch/arm64/include/uapi/asm/setup.h @@ -22,6 +22,6 @@ #include -#define COMMAND_LINE_SIZE 2048 +#define COMMAND_LINE_SIZE 4096 #endif diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index 17408dc0d8f34907ade76bb3655f370f2329ea27..1910098eb9f8194e7edeb2e30bdc9ad24ca4165c 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -27,8 +27,12 @@ OBJCOPYFLAGS := --prefix-symbols=__efistub_ $(obj)/%.stub.o: $(obj)/%.o FORCE $(call if_changed,objcopy) -arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \ +arm64-obj-$(CONFIG_COMPAT) += sys32.o signal32.o \ sys_compat.o +ifneq ($(CONFIG_COMPAT_VDSO), y) +arm64-obj-$(CONFIG_COMPAT) += sigreturn32.o +endif +arm64-obj-$(CONFIG_KUSER_HELPERS) += kuser32.o arm64-obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o arm64-obj-$(CONFIG_ARM64_MODULE_PLTS) += module-plts.o @@ -61,6 +65,7 @@ arm64-obj-$(CONFIG_SHADOW_CALL_STACK) += scs.o obj-y += $(arm64-obj-y) vdso/ probes/ obj-m += $(arm64-obj-m) +obj-$(CONFIG_COMPAT_VDSO) += vdso32/ head-y := head.o extra-y += $(head-y) vmlinux.lds diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c index 72f63a59b0088ac0f825b815ea692e44ad05004f..57c97728a8bf511535fb157f152434d7caf71444 100644 --- a/arch/arm64/kernel/arm64ksyms.c +++ b/arch/arm64/kernel/arm64ksyms.c @@ -29,6 +29,7 @@ #include #include +#include #include EXPORT_SYMBOL(copy_page); @@ -86,3 +87,15 @@ extern long long __ashrti3(long long a, int b); EXPORT_SYMBOL(__ashrti3); extern long long __lshrti3(long long a, int b); EXPORT_SYMBOL(__lshrti3); + + /* caching functions */ +EXPORT_SYMBOL_GPL(__dma_inv_area); +EXPORT_SYMBOL_GPL(__dma_clean_area); +EXPORT_SYMBOL_GPL(__dma_flush_area); +EXPORT_SYMBOL_GPL(__flush_dcache_area); + +EXPORT_SYMBOL_GPL(__bss_stop); +EXPORT_SYMBOL_GPL(__per_cpu_start); +EXPORT_SYMBOL_GPL(__per_cpu_end); +EXPORT_SYMBOL_GPL(_sdata); +EXPORT_SYMBOL_GPL(cpu_do_idle); diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c index 39dc98dd78ebf9fbf081f550e8e3f1eaeb221d76..181c29af561793317960609399ff7506ab5d90a4 100644 --- a/arch/arm64/kernel/armv8_deprecated.c +++ b/arch/arm64/kernel/armv8_deprecated.c @@ -604,7 +604,7 @@ static struct undef_hook setend_hooks[] = { }, { /* Thumb mode */ - .instr_mask = 0x0000fff7, + .instr_mask = 0xfffffff7, .instr_val = 0x0000b650, .pstate_mask = (PSR_AA32_T_BIT | PSR_AA32_MODE_MASK), .pstate_val = (PSR_AA32_T_BIT | PSR_AA32_MODE_USR), diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 87c3d4981feb39d74da7a42910965aac8877d703..b8f83cfe43f3cd418721339de740cc788c1eaea8 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -25,13 +25,14 @@ #include #include #include +#include #include #include #include #include +#include #include #include -#include #include #include @@ -84,6 +85,11 @@ int main(void) DEFINE(S_STACKFRAME, offsetof(struct pt_regs, stackframe)); DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs)); BLANK(); +#ifdef CONFIG_COMPAT + DEFINE(COMPAT_SIGFRAME_REGS_OFFSET, offsetof(struct compat_sigframe, uc.uc_mcontext.arm_r0)); + DEFINE(COMPAT_RT_SIGFRAME_REGS_OFFSET, offsetof(struct compat_rt_sigframe, sig.uc.uc_mcontext.arm_r0)); + BLANK(); +#endif DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id.counter)); BLANK(); DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm)); @@ -108,22 +114,28 @@ int main(void) DEFINE(CLOCK_COARSE_RES, LOW_RES_NSEC); DEFINE(NSEC_PER_SEC, NSEC_PER_SEC); BLANK(); - DEFINE(VDSO_CS_CYCLE_LAST, offsetof(struct vdso_data, cs_cycle_last)); - DEFINE(VDSO_RAW_TIME_SEC, offsetof(struct vdso_data, raw_time_sec)); - DEFINE(VDSO_RAW_TIME_NSEC, offsetof(struct vdso_data, raw_time_nsec)); - DEFINE(VDSO_XTIME_CLK_SEC, offsetof(struct vdso_data, xtime_clock_sec)); - DEFINE(VDSO_XTIME_CLK_NSEC, offsetof(struct vdso_data, xtime_clock_nsec)); - DEFINE(VDSO_XTIME_CRS_SEC, offsetof(struct vdso_data, xtime_coarse_sec)); - DEFINE(VDSO_XTIME_CRS_NSEC, offsetof(struct vdso_data, xtime_coarse_nsec)); - DEFINE(VDSO_WTM_CLK_SEC, offsetof(struct vdso_data, wtm_clock_sec)); - DEFINE(VDSO_WTM_CLK_NSEC, offsetof(struct vdso_data, wtm_clock_nsec)); - DEFINE(VDSO_TB_SEQ_COUNT, offsetof(struct vdso_data, tb_seq_count)); - DEFINE(VDSO_CS_MONO_MULT, offsetof(struct vdso_data, cs_mono_mult)); - DEFINE(VDSO_CS_RAW_MULT, offsetof(struct vdso_data, cs_raw_mult)); - DEFINE(VDSO_CS_SHIFT, offsetof(struct vdso_data, cs_shift)); + DEFINE(VDSO_SEQ, offsetof(struct vdso_data, seq)); + DEFINE(VDSO_CLK_MODE, offsetof(struct vdso_data, clock_mode)); + DEFINE(VDSO_CYCLE_LAST, offsetof(struct vdso_data, cycle_last)); + DEFINE(VDSO_MASK, offsetof(struct vdso_data, mask)); + DEFINE(VDSO_MULT, offsetof(struct vdso_data, mult)); + DEFINE(VDSO_SHIFT, offsetof(struct vdso_data, shift)); + DEFINE(VDSO_REALTIME_SEC, offsetof(struct vdso_data, basetime[CLOCK_REALTIME].sec)); + DEFINE(VDSO_REALTIME_NSEC, offsetof(struct vdso_data, basetime[CLOCK_REALTIME].nsec)); + DEFINE(VDSO_MONO_SEC, offsetof(struct vdso_data, basetime[CLOCK_MONOTONIC].sec)); + DEFINE(VDSO_MONO_NSEC, offsetof(struct vdso_data, basetime[CLOCK_MONOTONIC].nsec)); + DEFINE(VDSO_MONO_RAW_SEC, offsetof(struct vdso_data, basetime[CLOCK_MONOTONIC_RAW].sec)); + DEFINE(VDSO_MONO_RAW_NSEC, offsetof(struct vdso_data, basetime[CLOCK_MONOTONIC_RAW].nsec)); + DEFINE(VDSO_BOOTTIME_SEC, offsetof(struct vdso_data, basetime[CLOCK_BOOTTIME].sec)); + DEFINE(VDSO_BOOTTIME_NSEC, offsetof(struct vdso_data, basetime[CLOCK_BOOTTIME].nsec)); + DEFINE(VDSO_TAI_SEC, offsetof(struct vdso_data, basetime[CLOCK_TAI].sec)); + DEFINE(VDSO_TAI_NSEC, offsetof(struct vdso_data, basetime[CLOCK_TAI].nsec)); + DEFINE(VDSO_RT_COARSE_SEC, offsetof(struct vdso_data, basetime[CLOCK_REALTIME_COARSE].sec)); + DEFINE(VDSO_RT_COARSE_NSEC, offsetof(struct vdso_data, basetime[CLOCK_REALTIME_COARSE].nsec)); + DEFINE(VDSO_MONO_COARSE_SEC, offsetof(struct vdso_data, basetime[CLOCK_MONOTONIC_COARSE].sec)); + DEFINE(VDSO_MONO_COARSE_NSEC, offsetof(struct vdso_data, basetime[CLOCK_MONOTONIC_COARSE].nsec)); DEFINE(VDSO_TZ_MINWEST, offsetof(struct vdso_data, tz_minuteswest)); DEFINE(VDSO_TZ_DSTTIME, offsetof(struct vdso_data, tz_dsttime)); - DEFINE(VDSO_USE_SYSCALL, offsetof(struct vdso_data, use_syscall)); BLANK(); DEFINE(TVAL_TV_SEC, offsetof(struct timeval, tv_sec)); DEFINE(TVAL_TV_USEC, offsetof(struct timeval, tv_usec)); diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 71888808ded72ade5574fd067b991e40a93e6cc7..76490b0cefceef544de124e3b7260952df04e703 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -643,6 +643,18 @@ needs_tx2_tvm_workaround(const struct arm64_cpu_capabilities *entry, return false; } +static bool __maybe_unused +has_neoverse_n1_erratum_1542419(const struct arm64_cpu_capabilities *entry, + int scope) +{ + u32 midr = read_cpuid_id(); + bool has_dic = read_cpuid_cachetype() & BIT(CTR_DIC_SHIFT); + const struct midr_range range = MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1); + + WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); + return is_midr_in_range(midr, &range) && has_dic; +} + #ifdef CONFIG_HARDEN_EL2_VECTORS static const struct midr_range arm64_harden_el2_vectors[] = { @@ -834,6 +846,16 @@ const struct arm64_cpu_capabilities arm64_errata[] = { ERRATA_MIDR_RANGE_LIST(tx2_family_cpus), .matches = needs_tx2_tvm_workaround, }, +#endif +#ifdef CONFIG_ARM64_ERRATUM_1542419 + { + /* we depend on the firmware portion for correctness */ + .desc = "ARM erratum 1542419 (kernel portion)", + .capability = ARM64_WORKAROUND_1542419, + .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, + .matches = has_neoverse_n1_erratum_1542419, + .cpu_enable = cpu_enable_trap_ctr_access, + }, #endif { } diff --git a/arch/arm64/kernel/cpuidle.c b/arch/arm64/kernel/cpuidle.c index f2d13810daa8fc28449be12138e398a14275fc9a..e77407778a9ea8922cb2f3f5ef82bb3da9843e36 100644 --- a/arch/arm64/kernel/cpuidle.c +++ b/arch/arm64/kernel/cpuidle.c @@ -42,6 +42,7 @@ int arm_cpuidle_suspend(int index) return cpu_ops[cpu]->cpu_suspend(index); } +EXPORT_SYMBOL_GPL(arm_cpuidle_suspend); #ifdef CONFIG_ACPI diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index fac664c0b5974e1c8ed38bdcf351fdf36a10ab89..416c9ac8897595e9fe7c44eb26620b6122279c5a 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -674,7 +674,7 @@ ENTRY(__boot_cpu_mode) * with MMU turned off. */ ENTRY(__early_cpu_boot_status) - .long 0 + .quad 0 .popsection diff --git a/arch/arm64/kernel/kuser32.S b/arch/arm64/kernel/kuser32.S index 997e6b27ff6a41a424cec2503bae44807f576888..49825e9e421e0d9aa8df026cbc46c0308f1d3928 100644 --- a/arch/arm64/kernel/kuser32.S +++ b/arch/arm64/kernel/kuser32.S @@ -1,29 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* - * Low-level user helpers placed in the vectors page for AArch32. + * AArch32 user helpers. * Based on the kuser helpers in arch/arm/kernel/entry-armv.S. * * Copyright (C) 2005-2011 Nicolas Pitre - * Copyright (C) 2012 ARM Ltd. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. + * Copyright (C) 2012-2018 ARM Ltd. * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - * - * - * AArch32 user helpers. - * - * Each segment is 32-byte aligned and will be moved to the top of the high - * vector page. New segments (if ever needed) must be added in front of - * existing ones. This mechanism should be used only for things that are - * really small and justified, and not be abused freely. + * The kuser helpers below are mapped at a fixed address by + * aarch32_setup_additional_pages() and are provided for compatibility + * reasons with 32 bit (aarch32) applications that need them. * * See Documentation/arm/kernel_user_helpers.txt for formal definitions. */ @@ -77,42 +62,3 @@ __kuser_helper_version: // 0xffff0ffc .word ((__kuser_helper_end - __kuser_helper_start) >> 5) .globl __kuser_helper_end __kuser_helper_end: - -/* - * AArch32 sigreturn code - * - * For ARM syscalls, the syscall number has to be loaded into r7. - * We do not support an OABI userspace. - * - * For Thumb syscalls, we also pass the syscall number via r7. We therefore - * need two 16-bit instructions. - */ - .globl __aarch32_sigret_code_start -__aarch32_sigret_code_start: - - /* - * ARM Code - */ - .byte __NR_compat_sigreturn, 0x70, 0xa0, 0xe3 // mov r7, #__NR_compat_sigreturn - .byte __NR_compat_sigreturn, 0x00, 0x00, 0xef // svc #__NR_compat_sigreturn - - /* - * Thumb code - */ - .byte __NR_compat_sigreturn, 0x27 // svc #__NR_compat_sigreturn - .byte __NR_compat_sigreturn, 0xdf // mov r7, #__NR_compat_sigreturn - - /* - * ARM code - */ - .byte __NR_compat_rt_sigreturn, 0x70, 0xa0, 0xe3 // mov r7, #__NR_compat_rt_sigreturn - .byte __NR_compat_rt_sigreturn, 0x00, 0x00, 0xef // svc #__NR_compat_rt_sigreturn - - /* - * Thumb code - */ - .byte __NR_compat_rt_sigreturn, 0x27 // svc #__NR_compat_rt_sigreturn - .byte __NR_compat_rt_sigreturn, 0xdf // mov r7, #__NR_compat_rt_sigreturn - - .globl __aarch32_sigret_code_end -__aarch32_sigret_code_end: diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 74a25b4b8e87ec1e4cbf07ed185baf1f14e66403..980d9ea8bd4584352d0b8348ed23fe2aa84db83c 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -75,6 +75,7 @@ void (*pm_power_off)(void); EXPORT_SYMBOL_GPL(pm_power_off); void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd); +EXPORT_SYMBOL_GPL(arm_pm_restart); /* * This is our default idle handler. diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 53feccf975df515e3aeb9536fbf3ef3a0464fb47..f654b4454b4da1d7ecb4daa09632f9b649089cea 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -69,6 +69,14 @@ static struct resource *standard_resources; phys_addr_t __fdt_pointer __initdata; +/* Vendor stub */ +unsigned int boot_reason; +EXPORT_SYMBOL_GPL(boot_reason); + +/* Vendor stub */ +unsigned int cold_boot; +EXPORT_SYMBOL_GPL(cold_boot); + /* * Standard memory resources */ diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c index 24b09003f8214ce0df5a222a112e6cf10d2161e9..f2ac273f775a6f8fd0f01996e820b6b61328baea 100644 --- a/arch/arm64/kernel/signal32.c +++ b/arch/arm64/kernel/signal32.c @@ -29,42 +29,7 @@ #include #include #include - -struct compat_sigcontext { - /* We always set these two fields to 0 */ - compat_ulong_t trap_no; - compat_ulong_t error_code; - - compat_ulong_t oldmask; - compat_ulong_t arm_r0; - compat_ulong_t arm_r1; - compat_ulong_t arm_r2; - compat_ulong_t arm_r3; - compat_ulong_t arm_r4; - compat_ulong_t arm_r5; - compat_ulong_t arm_r6; - compat_ulong_t arm_r7; - compat_ulong_t arm_r8; - compat_ulong_t arm_r9; - compat_ulong_t arm_r10; - compat_ulong_t arm_fp; - compat_ulong_t arm_ip; - compat_ulong_t arm_sp; - compat_ulong_t arm_lr; - compat_ulong_t arm_pc; - compat_ulong_t arm_cpsr; - compat_ulong_t fault_address; -}; - -struct compat_ucontext { - compat_ulong_t uc_flags; - compat_uptr_t uc_link; - compat_stack_t uc_stack; - struct compat_sigcontext uc_mcontext; - compat_sigset_t uc_sigmask; - int __unused[32 - (sizeof (compat_sigset_t) / sizeof (int))]; - compat_ulong_t uc_regspace[128] __attribute__((__aligned__(8))); -}; +#include struct compat_vfp_sigframe { compat_ulong_t magic; @@ -92,16 +57,6 @@ struct compat_aux_sigframe { unsigned long end_magic; } __attribute__((__aligned__(8))); -struct compat_sigframe { - struct compat_ucontext uc; - compat_ulong_t retcode[2]; -}; - -struct compat_rt_sigframe { - struct compat_siginfo info; - struct compat_sigframe sig; -}; - #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set) @@ -398,14 +353,38 @@ static void compat_setup_return(struct pt_regs *regs, struct k_sigaction *ka, retcode = ptr_to_compat(ka->sa.sa_restorer); } else { /* Set up sigreturn pointer */ +#ifdef CONFIG_COMPAT_VDSO + void *vdso_base = current->mm->context.vdso; + void *vdso_trampoline; + + if (ka->sa.sa_flags & SA_SIGINFO) { + if (thumb) { + vdso_trampoline = VDSO_SYMBOL(vdso_base, + compat_rt_sigreturn_thumb); + } else { + vdso_trampoline = VDSO_SYMBOL(vdso_base, + compat_rt_sigreturn_arm); + } + } else { + if (thumb) { + vdso_trampoline = VDSO_SYMBOL(vdso_base, + compat_sigreturn_thumb); + } else { + vdso_trampoline = VDSO_SYMBOL(vdso_base, + compat_sigreturn_arm); + } + } + + retcode = ptr_to_compat(vdso_trampoline) + thumb; +#else unsigned int idx = thumb << 1; if (ka->sa.sa_flags & SA_SIGINFO) idx += 3; - retcode = AARCH32_VECTORS_BASE + - AARCH32_KERN_SIGRET_CODE_OFFSET + + retcode = (unsigned long)current->mm->context.vdso + (idx << 2) + thumb; +#endif } regs->regs[0] = usig; diff --git a/arch/arm64/kernel/sigreturn32.S b/arch/arm64/kernel/sigreturn32.S new file mode 100644 index 0000000000000000000000000000000000000000..475d30d471ac1634364bab74e7f3d58c0dfc1fb6 --- /dev/null +++ b/arch/arm64/kernel/sigreturn32.S @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * AArch32 sigreturn code. + * Based on the kuser helpers in arch/arm/kernel/entry-armv.S. + * + * Copyright (C) 2005-2011 Nicolas Pitre + * Copyright (C) 2012-2018 ARM Ltd. + * + * For ARM syscalls, the syscall number has to be loaded into r7. + * We do not support an OABI userspace. + * + * For Thumb syscalls, we also pass the syscall number via r7. We therefore + * need two 16-bit instructions. + */ + +#include + + .globl __aarch32_sigret_code_start +__aarch32_sigret_code_start: + + /* + * ARM Code + */ + .byte __NR_compat_sigreturn, 0x70, 0xa0, 0xe3 // mov r7, #__NR_compat_sigreturn + .byte __NR_compat_sigreturn, 0x00, 0x00, 0xef // svc #__NR_compat_sigreturn + + /* + * Thumb code + */ + .byte __NR_compat_sigreturn, 0x27 // svc #__NR_compat_sigreturn + .byte __NR_compat_sigreturn, 0xdf // mov r7, #__NR_compat_sigreturn + + /* + * ARM code + */ + .byte __NR_compat_rt_sigreturn, 0x70, 0xa0, 0xe3 // mov r7, #__NR_compat_rt_sigreturn + .byte __NR_compat_rt_sigreturn, 0x00, 0x00, 0xef // svc #__NR_compat_rt_sigreturn + + /* + * Thumb code + */ + .byte __NR_compat_rt_sigreturn, 0x27 // svc #__NR_compat_rt_sigreturn + .byte __NR_compat_rt_sigreturn, 0xdf // mov r7, #__NR_compat_rt_sigreturn + + .globl __aarch32_sigret_code_end +__aarch32_sigret_code_end: diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 7f26618580820e8e4ba285cd312f4c0f78911cf3..ae1e1d23d9ed9579d868cf84593a5390c51fda45 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -597,6 +597,9 @@ static void __init acpi_parse_and_init_cpus(void) #else #define acpi_parse_and_init_cpus(...) do { } while (0) #endif +/* Dummy vendor field */ +DEFINE_PER_CPU(bool, pending_ipi); +EXPORT_SYMBOL_GPL(pending_ipi); static void (*__smp_update_ipi_history_cb)(int cpu); /* diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c index 010212d35700eedce16a0602756b15fd141ff3eb..3ef9d0a3ac1dce836ffe3510ac41ed10a3d714eb 100644 --- a/arch/arm64/kernel/sys_compat.c +++ b/arch/arm64/kernel/sys_compat.c @@ -19,6 +19,7 @@ */ #include +#include #include #include #include @@ -28,6 +29,7 @@ #include #include +#include #include static long @@ -41,6 +43,15 @@ __do_compat_cache_op(unsigned long start, unsigned long end) if (fatal_signal_pending(current)) return 0; + if (cpus_have_const_cap(ARM64_WORKAROUND_1542419)) { + /* + * The workaround requires an inner-shareable tlbi. + * We pick the reserved-ASID to minimise the impact. + */ + __tlbi(aside1is, __TLBI_VADDR(0, 0)); + dsb(ish); + } + ret = __flush_cache_user_range(start, start + chunk); if (ret) return ret; diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 178b9017111258228ef9c27d7f6aa5e7330bfe57..1592584f5622b6e82a610081e00f1d9a78bb4b41 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -482,6 +482,15 @@ static void ctr_read_handler(unsigned int esr, struct pt_regs *regs) int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT; unsigned long val = arm64_ftr_reg_user_value(&arm64_ftr_reg_ctrel0); + if (cpus_have_const_cap(ARM64_WORKAROUND_1542419)) { + /* Hide DIC so that we can trap the unnecessary maintenance...*/ + val &= ~BIT(CTR_DIC_SHIFT); + + /* ... and fake IminLine to reduce the number of traps. */ + val &= ~CTR_IMINLINE_MASK; + val |= (PAGE_SHIFT - 2) & CTR_IMINLINE_MASK; + } + pt_regs_write_reg(regs, rt, val); arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c index 42b7082029e1d5003e509ffdbf4d916cbeb43dc7..ad3a81b2c7cec14f7afa3e923e3ac27a6969266c 100644 --- a/arch/arm64/kernel/vdso.c +++ b/arch/arm64/kernel/vdso.c @@ -1,5 +1,5 @@ /* - * VDSO implementation for AArch64 and vector page setup for AArch32. + * VDSO implementations. * * Copyright (C) 2012 ARM Limited * @@ -31,90 +31,74 @@ #include #include #include +#include +#include +#include #include #include #include -#include extern char vdso_start[], vdso_end[]; -static unsigned long vdso_pages __ro_after_init; +#ifdef CONFIG_COMPAT_VDSO +extern char vdso32_start[], vdso32_end[]; +#endif /* CONFIG_COMPAT_VDSO */ + +/* vdso_lookup arch_index */ +enum arch_vdso_type { + ARM64_VDSO = 0, +#ifdef CONFIG_COMPAT_VDSO + ARM64_VDSO32 = 1, +#endif /* CONFIG_COMPAT_VDSO */ +}; +#ifdef CONFIG_COMPAT_VDSO +#define VDSO_TYPES (ARM64_VDSO32 + 1) +#else +#define VDSO_TYPES (ARM64_VDSO + 1) +#endif /* CONFIG_COMPAT_VDSO */ + +struct __vdso_abi { + const char *name; + const char *vdso_code_start; + const char *vdso_code_end; + unsigned long vdso_pages; + /* Data Mapping */ + struct vm_special_mapping *dm; + /* Code Mapping */ + struct vm_special_mapping *cm; +}; + +static struct __vdso_abi vdso_lookup[VDSO_TYPES] __ro_after_init = { + { + .name = "vdso", + .vdso_code_start = vdso_start, + .vdso_code_end = vdso_end, + }, +#ifdef CONFIG_COMPAT_VDSO + { + .name = "vdso32", + .vdso_code_start = vdso32_start, + .vdso_code_end = vdso32_end, + }, +#endif /* CONFIG_COMPAT_VDSO */ +}; /* * The vDSO data page. */ static union { - struct vdso_data data; + struct vdso_data data[CS_BASES]; u8 page[PAGE_SIZE]; } vdso_data_store __page_aligned_data; -struct vdso_data *vdso_data = &vdso_data_store.data; - -#ifdef CONFIG_COMPAT -/* - * Create and map the vectors page for AArch32 tasks. - */ -static struct page *vectors_page[1] __ro_after_init; - -static int __init alloc_vectors_page(void) -{ - extern char __kuser_helper_start[], __kuser_helper_end[]; - extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[]; - - int kuser_sz = __kuser_helper_end - __kuser_helper_start; - int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start; - unsigned long vpage; - - vpage = get_zeroed_page(GFP_ATOMIC); - - if (!vpage) - return -ENOMEM; - - /* kuser helpers */ - memcpy((void *)vpage + 0x1000 - kuser_sz, __kuser_helper_start, - kuser_sz); - - /* sigreturn code */ - memcpy((void *)vpage + AARCH32_KERN_SIGRET_CODE_OFFSET, - __aarch32_sigret_code_start, sigret_sz); - - flush_icache_range(vpage, vpage + PAGE_SIZE); - vectors_page[0] = virt_to_page(vpage); - - return 0; -} -arch_initcall(alloc_vectors_page); - -int aarch32_setup_vectors_page(struct linux_binprm *bprm, int uses_interp) -{ - struct mm_struct *mm = current->mm; - unsigned long addr = AARCH32_VECTORS_BASE; - static const struct vm_special_mapping spec = { - .name = "[vectors]", - .pages = vectors_page, - - }; - void *ret; - - if (down_write_killable(&mm->mmap_sem)) - return -EINTR; - current->mm->context.vdso = (void *)addr; - - /* Map vectors page at the high address. */ - ret = _install_special_mapping(mm, addr, PAGE_SIZE, - VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC, - &spec); - - up_write(&mm->mmap_sem); - - return PTR_ERR_OR_ZERO(ret); -} -#endif /* CONFIG_COMPAT */ +struct vdso_data *vdso_data = vdso_data_store.data; -static int vdso_mremap(const struct vm_special_mapping *sm, - struct vm_area_struct *new_vma) +static int __vdso_remap(enum arch_vdso_type arch_index, + const struct vm_special_mapping *sm, + struct vm_area_struct *new_vma) { unsigned long new_size = new_vma->vm_end - new_vma->vm_start; - unsigned long vdso_size = vdso_end - vdso_start; + unsigned long vdso_size = vdso_lookup[arch_index].vdso_code_end - + vdso_lookup[arch_index].vdso_code_start; if (vdso_size != new_size) return -EINVAL; @@ -124,31 +108,25 @@ static int vdso_mremap(const struct vm_special_mapping *sm, return 0; } -static struct vm_special_mapping vdso_spec[2] __ro_after_init = { - { - .name = "[vvar]", - }, - { - .name = "[vdso]", - .mremap = vdso_mremap, - }, -}; - -static int __init vdso_init(void) +static int __vdso_init(enum arch_vdso_type arch_index) { int i; struct page **vdso_pagelist; unsigned long pfn; - if (memcmp(vdso_start, "\177ELF", 4)) { + if (memcmp(vdso_lookup[arch_index].vdso_code_start, "\177ELF", 4)) { pr_err("vDSO is not a valid ELF object!\n"); return -EINVAL; } - vdso_pages = (vdso_end - vdso_start) >> PAGE_SHIFT; + vdso_lookup[arch_index].vdso_pages = ( + vdso_lookup[arch_index].vdso_code_end - + vdso_lookup[arch_index].vdso_code_start) >> + PAGE_SHIFT; /* Allocate the vDSO pagelist, plus a page for the data. */ - vdso_pagelist = kcalloc(vdso_pages + 1, sizeof(struct page *), + vdso_pagelist = kcalloc(vdso_lookup[arch_index].vdso_pages + 1, + sizeof(struct page *), GFP_KERNEL); if (vdso_pagelist == NULL) return -ENOMEM; @@ -158,39 +136,38 @@ static int __init vdso_init(void) /* Grab the vDSO code pages. */ - pfn = sym_to_pfn(vdso_start); + pfn = sym_to_pfn(vdso_lookup[arch_index].vdso_code_start); - for (i = 0; i < vdso_pages; i++) + for (i = 0; i < vdso_lookup[arch_index].vdso_pages; i++) vdso_pagelist[i + 1] = pfn_to_page(pfn + i); - vdso_spec[0].pages = &vdso_pagelist[0]; - vdso_spec[1].pages = &vdso_pagelist[1]; + vdso_lookup[arch_index].dm->pages = &vdso_pagelist[0]; + vdso_lookup[arch_index].cm->pages = &vdso_pagelist[1]; return 0; } -arch_initcall(vdso_init); -int arch_setup_additional_pages(struct linux_binprm *bprm, - int uses_interp) +static int __setup_additional_pages(enum arch_vdso_type arch_index, + struct mm_struct *mm, + struct linux_binprm *bprm, + int uses_interp) { - struct mm_struct *mm = current->mm; unsigned long vdso_base, vdso_text_len, vdso_mapping_len; void *ret; - vdso_text_len = vdso_pages << PAGE_SHIFT; + vdso_text_len = vdso_lookup[arch_index].vdso_pages << PAGE_SHIFT; /* Be sure to map the data page */ vdso_mapping_len = vdso_text_len + PAGE_SIZE; - if (down_write_killable(&mm->mmap_sem)) - return -EINTR; vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0); if (IS_ERR_VALUE(vdso_base)) { ret = ERR_PTR(vdso_base); goto up_fail; } + ret = _install_special_mapping(mm, vdso_base, PAGE_SIZE, VM_READ|VM_MAYREAD, - &vdso_spec[0]); + vdso_lookup[arch_index].dm); if (IS_ERR(ret)) goto up_fail; @@ -199,59 +176,268 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, ret = _install_special_mapping(mm, vdso_base, vdso_text_len, VM_READ|VM_EXEC| VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, - &vdso_spec[1]); + vdso_lookup[arch_index].cm); if (IS_ERR(ret)) goto up_fail; - - up_write(&mm->mmap_sem); return 0; up_fail: mm->context.vdso = NULL; - up_write(&mm->mmap_sem); return PTR_ERR(ret); } +#ifdef CONFIG_COMPAT /* - * Update the vDSO data page to keep in sync with kernel timekeeping. + * Create and map the vectors page for AArch32 tasks. */ -void update_vsyscall(struct timekeeper *tk) +#ifdef CONFIG_COMPAT_VDSO +static int aarch32_vdso_mremap(const struct vm_special_mapping *sm, + struct vm_area_struct *new_vma) +{ + return __vdso_remap(ARM64_VDSO32, sm, new_vma); +} +#endif /* CONFIG_COMPAT_VDSO */ + +/* + * aarch32_vdso_pages: + * 0 - kuser helpers + * 1 - sigreturn code + * or (CONFIG_COMPAT_VDSO): + * 0 - kuser helpers + * 1 - vdso data + * 2 - vdso code + */ +#define C_VECTORS 0 +#ifdef CONFIG_COMPAT_VDSO +#define C_VVAR 1 +#define C_VDSO 2 +#define C_PAGES (C_VDSO + 1) +#else +#define C_SIGPAGE 1 +#define C_PAGES (C_SIGPAGE + 1) +#endif /* CONFIG_COMPAT_VDSO */ +static struct page *aarch32_vdso_pages[C_PAGES] __ro_after_init; +static struct vm_special_mapping aarch32_vdso_spec[C_PAGES] = { + { + .name = "[vectors]", /* ABI */ + .pages = &aarch32_vdso_pages[C_VECTORS], + }, +#ifdef CONFIG_COMPAT_VDSO + { + .name = "[vvar]", + }, + { + .name = "[vdso]", + .mremap = aarch32_vdso_mremap, + }, +#else + { + .name = "[sigpage]", /* ABI */ + .pages = &aarch32_vdso_pages[C_SIGPAGE], + }, +#endif /* CONFIG_COMPAT_VDSO */ +}; + +static int aarch32_alloc_kuser_vdso_page(void) { - u32 use_syscall = !tk->tkr_mono.clock->archdata.vdso_direct; - - ++vdso_data->tb_seq_count; - smp_wmb(); - - vdso_data->use_syscall = use_syscall; - vdso_data->xtime_coarse_sec = tk->xtime_sec; - vdso_data->xtime_coarse_nsec = tk->tkr_mono.xtime_nsec >> - tk->tkr_mono.shift; - vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec; - vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec; - - /* Read without the seqlock held by clock_getres() */ - WRITE_ONCE(vdso_data->hrtimer_res, hrtimer_resolution); - - if (!use_syscall) { - /* tkr_mono.cycle_last == tkr_raw.cycle_last */ - vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last; - vdso_data->raw_time_sec = tk->raw_sec; - vdso_data->raw_time_nsec = tk->tkr_raw.xtime_nsec; - vdso_data->xtime_clock_sec = tk->xtime_sec; - vdso_data->xtime_clock_nsec = tk->tkr_mono.xtime_nsec; - vdso_data->cs_mono_mult = tk->tkr_mono.mult; - vdso_data->cs_raw_mult = tk->tkr_raw.mult; - /* tkr_mono.shift == tkr_raw.shift */ - vdso_data->cs_shift = tk->tkr_mono.shift; + extern char __kuser_helper_start[], __kuser_helper_end[]; + int kuser_sz = __kuser_helper_end - __kuser_helper_start; + unsigned long vdso_page; + + if (!IS_ENABLED(CONFIG_KUSER_HELPERS)) + return 0; + + vdso_page = get_zeroed_page(GFP_ATOMIC); + if (!vdso_page) + return -ENOMEM; + + memcpy((void *)(vdso_page + 0x1000 - kuser_sz), __kuser_helper_start, + kuser_sz); + aarch32_vdso_pages[C_VECTORS] = virt_to_page(vdso_page); + flush_dcache_page(aarch32_vdso_pages[C_VECTORS]); + return 0; +} + +#ifdef CONFIG_COMPAT_VDSO +static int __aarch32_alloc_vdso_pages(void) +{ + int ret; + + vdso_lookup[ARM64_VDSO32].dm = &aarch32_vdso_spec[C_VVAR]; + vdso_lookup[ARM64_VDSO32].cm = &aarch32_vdso_spec[C_VDSO]; + + ret = __vdso_init(ARM64_VDSO32); + if (ret) + return ret; + + ret = aarch32_alloc_kuser_vdso_page(); + if (ret) { + unsigned long c_vvar = + (unsigned long)page_to_virt(aarch32_vdso_pages[C_VVAR]); + unsigned long c_vdso = + (unsigned long)page_to_virt(aarch32_vdso_pages[C_VDSO]); + + free_page(c_vvar); + free_page(c_vdso); + } + + return ret; +} +#else +static int __aarch32_alloc_vdso_pages(void) +{ + extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[]; + int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start; + unsigned long sigpage; + int ret; + + sigpage = get_zeroed_page(GFP_ATOMIC); + if (!sigpage) + return -ENOMEM; + + memcpy((void *)sigpage, __aarch32_sigret_code_start, sigret_sz); + aarch32_vdso_pages[C_SIGPAGE] = virt_to_page(sigpage); + flush_dcache_page(aarch32_vdso_pages[C_SIGPAGE]); + + ret = aarch32_alloc_kuser_vdso_page(); + if (ret) + free_page(sigpage); + + return ret; +} +#endif /* CONFIG_COMPAT_VDSO */ + +static int __init aarch32_alloc_vdso_pages(void) +{ + return __aarch32_alloc_vdso_pages(); +} +arch_initcall(aarch32_alloc_vdso_pages); + +static int aarch32_kuser_helpers_setup(struct mm_struct *mm) +{ + void *ret; + + if (!IS_ENABLED(CONFIG_KUSER_HELPERS)) + return 0; + + /* + * Avoid VM_MAYWRITE for compatibility with arch/arm/, where it's + * not safe to CoW the page containing the CPU exception vectors. + */ + ret = _install_special_mapping(mm, AARCH32_VECTORS_BASE, PAGE_SIZE, + VM_READ | VM_EXEC | + VM_MAYREAD | VM_MAYEXEC, + &aarch32_vdso_spec[C_VECTORS]); + + return PTR_ERR_OR_ZERO(ret); +} + +#ifndef CONFIG_COMPAT_VDSO +static int aarch32_sigreturn_setup(struct mm_struct *mm) +{ + unsigned long addr; + void *ret; + + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0); + if (IS_ERR_VALUE(addr)) { + ret = ERR_PTR(addr); + goto out; } - smp_wmb(); - ++vdso_data->tb_seq_count; + /* + * VM_MAYWRITE is required to allow gdb to Copy-on-Write and + * set breakpoints. + */ + ret = _install_special_mapping(mm, addr, PAGE_SIZE, + VM_READ | VM_EXEC | VM_MAYREAD | + VM_MAYWRITE | VM_MAYEXEC, + &aarch32_vdso_spec[C_SIGPAGE]); + if (IS_ERR(ret)) + goto out; + + mm->context.vdso = (void *)addr; + +out: + return PTR_ERR_OR_ZERO(ret); } +#endif /* !CONFIG_COMPAT_VDSO */ -void update_vsyscall_tz(void) +int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { - vdso_data->tz_minuteswest = sys_tz.tz_minuteswest; - vdso_data->tz_dsttime = sys_tz.tz_dsttime; + struct mm_struct *mm = current->mm; + int ret; + + if (down_write_killable(&mm->mmap_sem)) + return -EINTR; + + ret = aarch32_kuser_helpers_setup(mm); + if (ret) + goto out; + +#ifdef CONFIG_COMPAT_VDSO + ret = __setup_additional_pages(ARM64_VDSO32, + mm, + bprm, + uses_interp); +#else + ret = aarch32_sigreturn_setup(mm); +#endif /* CONFIG_COMPAT_VDSO */ + +out: + up_write(&mm->mmap_sem); + return ret; +} +#endif /* CONFIG_COMPAT */ + +static int vdso_mremap(const struct vm_special_mapping *sm, + struct vm_area_struct *new_vma) +{ + return __vdso_remap(ARM64_VDSO, sm, new_vma); +} + +/* + * aarch64_vdso_pages: + * 0 - vvar + * 1 - vdso + */ +#define A_VVAR 0 +#define A_VDSO 1 +#define A_PAGES (A_VDSO + 1) +static struct vm_special_mapping vdso_spec[A_PAGES] __ro_after_init = { + { + .name = "[vvar]", + }, + { + .name = "[vdso]", + .mremap = vdso_mremap, + }, +}; + +static int __init vdso_init(void) +{ + vdso_lookup[ARM64_VDSO].dm = &vdso_spec[A_VVAR]; + vdso_lookup[ARM64_VDSO].cm = &vdso_spec[A_VDSO]; + + return __vdso_init(ARM64_VDSO); +} +arch_initcall(vdso_init); + +int arch_setup_additional_pages(struct linux_binprm *bprm, + int uses_interp) +{ + struct mm_struct *mm = current->mm; + int ret; + + if (down_write_killable(&mm->mmap_sem)) + return -EINTR; + + ret = __setup_additional_pages(ARM64_VDSO, + mm, + bprm, + uses_interp); + + up_write(&mm->mmap_sem); + + return ret; } diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile index f7620d315bc5a7d208997d8b3d14022d19c88309..d63cbd2ff4d7dcb27c9489fd9bfe8138935bdff7 100644 --- a/arch/arm64/kernel/vdso/Makefile +++ b/arch/arm64/kernel/vdso/Makefile @@ -6,26 +6,51 @@ # Heavily based on the vDSO Makefiles for other archs. # -obj-vdso := gettimeofday.o note.o sigreturn.o +# Absolute relocation type $(ARCH_REL_TYPE_ABS) needs to be defined before +# the inclusion of generic Makefile. +ARCH_REL_TYPE_ABS := R_AARCH64_JUMP_SLOT|R_AARCH64_GLOB_DAT|R_AARCH64_ABS64 +include $(srctree)/lib/vdso/Makefile + +obj-vdso := vgettimeofday.o note.o sigreturn.o # Build rules targets := $(obj-vdso) vdso.so vdso.so.dbg obj-vdso := $(addprefix $(obj)/, $(obj-vdso)) -ccflags-y := -shared -fno-common -fno-builtin -ccflags-y += -nostdlib -Wl,-soname=linux-vdso.so.1 \ - $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) +ldflags-y := -shared -nostdlib -soname=linux-vdso.so.1 --hash-style=sysv \ + --build-id -n -T ccflags-y += $(DISABLE_LTO) CFLAGS_REMOVE_vgettimeofday.o += $(CC_FLAGS_SCS) +ccflags-y := -fno-common -fno-builtin -fno-stack-protector +ccflags-y += -DDISABLE_BRANCH_PROFILING + +VDSO_LDFLAGS := -Bsymbolic + +CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) -Os +KBUILD_CFLAGS += $(DISABLE_LTO) +KASAN_SANITIZE := n +UBSAN_SANITIZE := n +OBJECT_FILES_NON_STANDARD := y +KCOV_INSTRUMENT := n + +CFLAGS_vgettimeofday.o = -O2 -mcmodel=tiny + +ifneq ($(c-gettimeofday-y),) + CFLAGS_vgettimeofday.o += -include $(c-gettimeofday-y) +endif + +# Clang versions less than 8 do not support -mcmodel=tiny +ifeq ($(CONFIG_CC_IS_CLANG), y) + ifeq ($(shell test $(CONFIG_CLANG_VERSION) -lt 80000; echo $$?),0) + CFLAGS_REMOVE_vgettimeofday.o += -mcmodel=tiny + endif +endif + # Disable gcov profiling for VDSO code GCOV_PROFILE := n -# Workaround for bare-metal (ELF) toolchains that neglect to pass -shared -# down to collect2, resulting in silent corruption of the vDSO image. -ccflags-y += -Wl,-shared - obj-y += vdso.o extra-y += vdso.lds CPPFLAGS_vdso.lds += -P -C -U$(ARCH) @@ -35,7 +60,7 @@ $(obj)/vdso.o : $(obj)/vdso.so # Link rule for the .so file, .lds has to be first $(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) - $(call if_changed,vdsold) + $(call if_changed,vdsold_and_vdso_check) # Strip rule for the .so file $(obj)/%.so: OBJCOPYFLAGS := -S @@ -52,16 +77,13 @@ endef include/generated/vdso-offsets.h: $(obj)/vdso.so.dbg FORCE $(call if_changed,vdsosym) -# Assembly rules for the .S files -$(obj-vdso): %.o: %.S FORCE - $(call if_changed_dep,vdsoas) - # Actual build commands -quiet_cmd_vdsold = VDSOL $@ - cmd_vdsold = $(CC) $(c_flags) -Wl,-n -Wl,-T $^ -o $@ quiet_cmd_vdsoas = VDSOA $@ cmd_vdsoas = $(CC) $(a_flags) -c -o $@ $< +quiet_cmd_vdsold_and_vdso_check = LD $@ + cmd_vdsold_and_vdso_check = $(cmd_ld); $(cmd_vdso_check) + # Install commands for the unstripped file quiet_cmd_vdso_install = INSTALL $@ cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@ diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S deleted file mode 100644 index 856fee6d3512977d502c86f265bd406245f3b039..0000000000000000000000000000000000000000 --- a/arch/arm64/kernel/vdso/gettimeofday.S +++ /dev/null @@ -1,334 +0,0 @@ -/* - * Userspace implementations of gettimeofday() and friends. - * - * Copyright (C) 2012 ARM Limited - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - * - * Author: Will Deacon - */ - -#include -#include -#include - -#define NSEC_PER_SEC_LO16 0xca00 -#define NSEC_PER_SEC_HI16 0x3b9a - -vdso_data .req x6 -seqcnt .req w7 -w_tmp .req w8 -x_tmp .req x8 - -/* - * Conventions for macro arguments: - * - An argument is write-only if its name starts with "res". - * - All other arguments are read-only, unless otherwise specified. - */ - - .macro seqcnt_acquire -9999: ldr seqcnt, [vdso_data, #VDSO_TB_SEQ_COUNT] - tbnz seqcnt, #0, 9999b - dmb ishld - .endm - - .macro seqcnt_check fail - dmb ishld - ldr w_tmp, [vdso_data, #VDSO_TB_SEQ_COUNT] - cmp w_tmp, seqcnt - b.ne \fail - .endm - - .macro syscall_check fail - ldr w_tmp, [vdso_data, #VDSO_USE_SYSCALL] - cbnz w_tmp, \fail - .endm - - .macro get_nsec_per_sec res - mov \res, #NSEC_PER_SEC_LO16 - movk \res, #NSEC_PER_SEC_HI16, lsl #16 - .endm - - /* - * Returns the clock delta, in nanoseconds left-shifted by the clock - * shift. - */ - .macro get_clock_shifted_nsec res, cycle_last, mult - /* Read the virtual counter. */ - isb - mrs x_tmp, cntvct_el0 - /* Calculate cycle delta and convert to ns. */ - sub \res, x_tmp, \cycle_last - /* We can only guarantee 56 bits of precision. */ - movn x_tmp, #0xff00, lsl #48 - and \res, x_tmp, \res - mul \res, \res, \mult - /* - * Fake address dependency from the value computed from the counter - * register to subsequent data page accesses so that the sequence - * locking also orders the read of the counter. - */ - and x_tmp, \res, xzr - add vdso_data, vdso_data, x_tmp - .endm - - /* - * Returns in res_{sec,nsec} the REALTIME timespec, based on the - * "wall time" (xtime) and the clock_mono delta. - */ - .macro get_ts_realtime res_sec, res_nsec, \ - clock_nsec, xtime_sec, xtime_nsec, nsec_to_sec - add \res_nsec, \clock_nsec, \xtime_nsec - udiv x_tmp, \res_nsec, \nsec_to_sec - add \res_sec, \xtime_sec, x_tmp - msub \res_nsec, x_tmp, \nsec_to_sec, \res_nsec - .endm - - /* - * Returns in res_{sec,nsec} the timespec based on the clock_raw delta, - * used for CLOCK_MONOTONIC_RAW. - */ - .macro get_ts_clock_raw res_sec, res_nsec, clock_nsec, nsec_to_sec - udiv \res_sec, \clock_nsec, \nsec_to_sec - msub \res_nsec, \res_sec, \nsec_to_sec, \clock_nsec - .endm - - /* sec and nsec are modified in place. */ - .macro add_ts sec, nsec, ts_sec, ts_nsec, nsec_to_sec - /* Add timespec. */ - add \sec, \sec, \ts_sec - add \nsec, \nsec, \ts_nsec - - /* Normalise the new timespec. */ - cmp \nsec, \nsec_to_sec - b.lt 9999f - sub \nsec, \nsec, \nsec_to_sec - add \sec, \sec, #1 -9999: - cmp \nsec, #0 - b.ge 9998f - add \nsec, \nsec, \nsec_to_sec - sub \sec, \sec, #1 -9998: - .endm - - .macro clock_gettime_return, shift=0 - .if \shift == 1 - lsr x11, x11, x12 - .endif - stp x10, x11, [x1, #TSPEC_TV_SEC] - mov x0, xzr - ret - .endm - - .macro jump_slot jumptable, index, label - .if (. - \jumptable) != 4 * (\index) - .error "Jump slot index mismatch" - .endif - b \label - .endm - - .text - -/* int __kernel_gettimeofday(struct timeval *tv, struct timezone *tz); */ -ENTRY(__kernel_gettimeofday) - .cfi_startproc - adr vdso_data, _vdso_data - /* If tv is NULL, skip to the timezone code. */ - cbz x0, 2f - - /* Compute the time of day. */ -1: seqcnt_acquire - syscall_check fail=4f - ldr x10, [vdso_data, #VDSO_CS_CYCLE_LAST] - /* w11 = cs_mono_mult, w12 = cs_shift */ - ldp w11, w12, [vdso_data, #VDSO_CS_MONO_MULT] - ldp x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC] - - get_nsec_per_sec res=x9 - lsl x9, x9, x12 - - get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11 - seqcnt_check fail=1b - get_ts_realtime res_sec=x10, res_nsec=x11, \ - clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9 - - /* Convert ns to us. */ - mov x13, #1000 - lsl x13, x13, x12 - udiv x11, x11, x13 - stp x10, x11, [x0, #TVAL_TV_SEC] -2: - /* If tz is NULL, return 0. */ - cbz x1, 3f - ldp w4, w5, [vdso_data, #VDSO_TZ_MINWEST] - stp w4, w5, [x1, #TZ_MINWEST] -3: - mov x0, xzr - ret -4: - /* Syscall fallback. */ - mov x8, #__NR_gettimeofday - svc #0 - ret - .cfi_endproc -ENDPROC(__kernel_gettimeofday) - -#define JUMPSLOT_MAX CLOCK_MONOTONIC_COARSE - -/* int __kernel_clock_gettime(clockid_t clock_id, struct timespec *tp); */ -ENTRY(__kernel_clock_gettime) - .cfi_startproc - cmp w0, #JUMPSLOT_MAX - b.hi syscall - adr vdso_data, _vdso_data - adr x_tmp, jumptable - add x_tmp, x_tmp, w0, uxtw #2 - br x_tmp - - ALIGN -jumptable: - jump_slot jumptable, CLOCK_REALTIME, realtime - jump_slot jumptable, CLOCK_MONOTONIC, monotonic - b syscall - b syscall - jump_slot jumptable, CLOCK_MONOTONIC_RAW, monotonic_raw - jump_slot jumptable, CLOCK_REALTIME_COARSE, realtime_coarse - jump_slot jumptable, CLOCK_MONOTONIC_COARSE, monotonic_coarse - - .if (. - jumptable) != 4 * (JUMPSLOT_MAX + 1) - .error "Wrong jumptable size" - .endif - - ALIGN -realtime: - seqcnt_acquire - syscall_check fail=syscall - ldr x10, [vdso_data, #VDSO_CS_CYCLE_LAST] - /* w11 = cs_mono_mult, w12 = cs_shift */ - ldp w11, w12, [vdso_data, #VDSO_CS_MONO_MULT] - ldp x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC] - - /* All computations are done with left-shifted nsecs. */ - get_nsec_per_sec res=x9 - lsl x9, x9, x12 - - get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11 - seqcnt_check fail=realtime - get_ts_realtime res_sec=x10, res_nsec=x11, \ - clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9 - clock_gettime_return, shift=1 - - ALIGN -monotonic: - seqcnt_acquire - syscall_check fail=syscall - ldr x10, [vdso_data, #VDSO_CS_CYCLE_LAST] - /* w11 = cs_mono_mult, w12 = cs_shift */ - ldp w11, w12, [vdso_data, #VDSO_CS_MONO_MULT] - ldp x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC] - ldp x3, x4, [vdso_data, #VDSO_WTM_CLK_SEC] - - /* All computations are done with left-shifted nsecs. */ - lsl x4, x4, x12 - get_nsec_per_sec res=x9 - lsl x9, x9, x12 - - get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11 - seqcnt_check fail=monotonic - get_ts_realtime res_sec=x10, res_nsec=x11, \ - clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9 - - add_ts sec=x10, nsec=x11, ts_sec=x3, ts_nsec=x4, nsec_to_sec=x9 - clock_gettime_return, shift=1 - - ALIGN -monotonic_raw: - seqcnt_acquire - syscall_check fail=syscall - ldr x10, [vdso_data, #VDSO_CS_CYCLE_LAST] - /* w11 = cs_raw_mult, w12 = cs_shift */ - ldp w12, w11, [vdso_data, #VDSO_CS_SHIFT] - ldp x13, x14, [vdso_data, #VDSO_RAW_TIME_SEC] - - /* All computations are done with left-shifted nsecs. */ - get_nsec_per_sec res=x9 - lsl x9, x9, x12 - - get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11 - seqcnt_check fail=monotonic_raw - get_ts_clock_raw res_sec=x10, res_nsec=x11, \ - clock_nsec=x15, nsec_to_sec=x9 - - add_ts sec=x10, nsec=x11, ts_sec=x13, ts_nsec=x14, nsec_to_sec=x9 - clock_gettime_return, shift=1 - - ALIGN -realtime_coarse: - seqcnt_acquire - ldp x10, x11, [vdso_data, #VDSO_XTIME_CRS_SEC] - seqcnt_check fail=realtime_coarse - clock_gettime_return - - ALIGN -monotonic_coarse: - seqcnt_acquire - ldp x10, x11, [vdso_data, #VDSO_XTIME_CRS_SEC] - ldp x13, x14, [vdso_data, #VDSO_WTM_CLK_SEC] - seqcnt_check fail=monotonic_coarse - - /* Computations are done in (non-shifted) nsecs. */ - get_nsec_per_sec res=x9 - add_ts sec=x10, nsec=x11, ts_sec=x13, ts_nsec=x14, nsec_to_sec=x9 - clock_gettime_return - - ALIGN -syscall: /* Syscall fallback. */ - mov x8, #__NR_clock_gettime - svc #0 - ret - .cfi_endproc -ENDPROC(__kernel_clock_gettime) - -/* int __kernel_clock_getres(clockid_t clock_id, struct timespec *res); */ -ENTRY(__kernel_clock_getres) - .cfi_startproc - cmp w0, #CLOCK_REALTIME - ccmp w0, #CLOCK_MONOTONIC, #0x4, ne - ccmp w0, #CLOCK_MONOTONIC_RAW, #0x4, ne - b.ne 1f - - adr vdso_data, _vdso_data - ldr w2, [vdso_data, #CLOCK_REALTIME_RES] - b 2f -1: - cmp w0, #CLOCK_REALTIME_COARSE - ccmp w0, #CLOCK_MONOTONIC_COARSE, #0x4, ne - b.ne 4f - ldr x2, 5f -2: - cbz x1, 3f - stp xzr, x2, [x1] - -3: /* res == NULL. */ - mov w0, wzr - ret - -4: /* Syscall fallback. */ - mov x8, #__NR_clock_getres - svc #0 - ret -5: - .quad CLOCK_COARSE_RES - .cfi_endproc -ENDPROC(__kernel_clock_getres) diff --git a/arch/arm64/kernel/vdso/vgettimeofday.c b/arch/arm64/kernel/vdso/vgettimeofday.c new file mode 100644 index 0000000000000000000000000000000000000000..4236cf34d7d9c340d0183a180852554410e1e755 --- /dev/null +++ b/arch/arm64/kernel/vdso/vgettimeofday.c @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * ARM64 userspace implementations of gettimeofday() and similar. + * + * Copyright (C) 2018 ARM Limited + * + */ + +int __kernel_clock_gettime(clockid_t clock, + struct __kernel_timespec *ts) +{ + return __cvdso_clock_gettime(clock, ts); +} + +int __kernel_gettimeofday(struct __kernel_old_timeval *tv, + struct timezone *tz) +{ + return __cvdso_gettimeofday(tv, tz); +} + +int __kernel_clock_getres(clockid_t clock_id, + struct __kernel_timespec *res) +{ + return __cvdso_clock_getres(clock_id, res); +} diff --git a/arch/arm64/kernel/vdso32/.gitignore b/arch/arm64/kernel/vdso32/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..4fea950fa5ed0fc018e39d102297cefe780a5119 --- /dev/null +++ b/arch/arm64/kernel/vdso32/.gitignore @@ -0,0 +1,2 @@ +vdso.lds +vdso.so.raw diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..9c5c5d6ccb07a779b9664ca3f4993103da599fa4 --- /dev/null +++ b/arch/arm64/kernel/vdso32/Makefile @@ -0,0 +1,211 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for vdso32 +# + +# Absolute relocation type $(ARCH_REL_TYPE_ABS) needs to be defined before +# the inclusion of generic Makefile. +ARCH_REL_TYPE_ABS := R_ARM_JUMP_SLOT|R_ARM_GLOB_DAT|R_ARM_ABS32 +include $(srctree)/lib/vdso/Makefile + +# Same as cc-*option, but using CC_COMPAT instead of CC +ifeq ($(CONFIG_CC_IS_CLANG), y) +COMPAT_GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE_COMPAT)elfedit)) +COMPAT_GCC_TOOLCHAIN := $(realpath $(COMPAT_GCC_TOOLCHAIN_DIR)/..) + +CC_COMPAT_CLANG_FLAGS := --target=$(notdir $(CROSS_COMPILE_COMPAT:%-=%)) +CC_COMPAT_CLANG_FLAGS += --prefix=$(COMPAT_GCC_TOOLCHAIN_DIR) +CC_COMPAT_CLANG_FLAGS += -no-integrated-as -Qunused-arguments +ifneq ($(COMPAT_GCC_TOOLCHAIN),) +CC_COMPAT_CLANG_FLAGS += --gcc-toolchain=$(COMPAT_GCC_TOOLCHAIN) +endif + +CC_COMPAT ?= $(CC) +CC_COMPAT += $(CC_COMPAT_CLANG_FLAGS) +else +CC_COMPAT ?= $(CROSS_COMPILE_COMPAT)gcc +endif + +cc32-option = $(call try-run,\ + $(CC_COMPAT) $(1) -c -x c /dev/null -o "$$TMP",$(1),$(2)) +cc32-disable-warning = $(call try-run,\ + $(CC_COMPAT) -W$(strip $(1)) -c -x c /dev/null -o "$$TMP",-Wno-$(strip $(1))) +cc32-ldoption = $(call try-run,\ + $(CC_COMPAT) $(1) -nostdlib -x c /dev/null -o "$$TMP",$(1),$(2)) +cc32-as-instr = $(call try-run,\ + printf "%b\n" "$(1)" | $(CC_COMPAT) $(VDSO_AFLAGS) -c -x assembler -o "$$TMP" -,$(2),$(3)) + +# We cannot use the global flags to compile the vDSO files, the main reason +# being that the 32-bit compiler may be older than the main (64-bit) compiler +# and therefore may not understand flags set using $(cc-option ...). Besides, +# arch-specific options should be taken from the arm Makefile instead of the +# arm64 one. +# As a result we set our own flags here. + +# KBUILD_CPPFLAGS and NOSTDINC_FLAGS from top-level Makefile +VDSO_CPPFLAGS := -D__KERNEL__ -nostdinc -isystem $(shell $(CC_COMPAT) -print-file-name=include) +VDSO_CPPFLAGS += $(LINUXINCLUDE) + +# Common C and assembly flags +# From top-level Makefile +VDSO_CAFLAGS := $(VDSO_CPPFLAGS) +ifneq ($(shell $(CC_COMPAT) --version 2>&1 | head -n 1 | grep clang),) +VDSO_CAFLAGS += --target=$(notdir $(CROSS_COMPILE_COMPAT:%-=%)) +endif + +VDSO_CAFLAGS += $(call cc32-option,-fno-PIE) +ifdef CONFIG_DEBUG_INFO +VDSO_CAFLAGS += -g +endif + +# From arm Makefile +VDSO_CAFLAGS += $(call cc32-option,-fno-dwarf2-cfi-asm) +VDSO_CAFLAGS += -mabi=aapcs-linux -mfloat-abi=soft +ifeq ($(CONFIG_CPU_BIG_ENDIAN), y) +VDSO_CAFLAGS += -mbig-endian +else +VDSO_CAFLAGS += -mlittle-endian +endif + +# From arm vDSO Makefile +VDSO_CAFLAGS += -fPIC -fno-builtin -fno-stack-protector +VDSO_CAFLAGS += -DDISABLE_BRANCH_PROFILING + + +# Try to compile for ARMv8. If the compiler is too old and doesn't support it, +# fall back to v7. There is no easy way to check for what architecture the code +# is being compiled, so define a macro specifying that (see arch/arm/Makefile). +VDSO_CAFLAGS += $(call cc32-option,-march=armv8-a -D__LINUX_ARM_ARCH__=8,\ + -march=armv7-a -D__LINUX_ARM_ARCH__=7) + +VDSO_CFLAGS := $(VDSO_CAFLAGS) +VDSO_CFLAGS += -DENABLE_COMPAT_VDSO=1 +# KBUILD_CFLAGS from top-level Makefile +VDSO_CFLAGS += -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \ + -fno-strict-aliasing -fno-common \ + -Werror-implicit-function-declaration \ + -Wno-format-security \ + -std=gnu89 +VDSO_CFLAGS += -O2 +# Some useful compiler-dependent flags from top-level Makefile +VDSO_CFLAGS += $(call cc32-option,-Wdeclaration-after-statement,) +VDSO_CFLAGS += $(call cc32-option,-Wno-pointer-sign) +VDSO_CFLAGS += $(call cc32-option,-fno-strict-overflow) +VDSO_CFLAGS += $(call cc32-option,-Werror=strict-prototypes) +VDSO_CFLAGS += $(call cc32-option,-Werror=date-time) +VDSO_CFLAGS += $(call cc32-option,-Werror=incompatible-pointer-types) + +# The 32-bit compiler does not provide 128-bit integers, which are used in +# some headers that are indirectly included from the vDSO code. +# This hack makes the compiler happy and should trigger a warning/error if +# variables of such type are referenced. +VDSO_CFLAGS += -D__uint128_t='void*' +# Silence some warnings coming from headers that operate on long's +# (on GCC 4.8 or older, there is unfortunately no way to silence this warning) +VDSO_CFLAGS += $(call cc32-disable-warning,shift-count-overflow) +VDSO_CFLAGS += -Wno-int-to-pointer-cast + +VDSO_AFLAGS := $(VDSO_CAFLAGS) +VDSO_AFLAGS += -D__ASSEMBLY__ + +# Check for binutils support for dmb ishld +dmbinstr := $(call cc32-as-instr,dmb ishld,-DCONFIG_AS_DMB_ISHLD=1) + +VDSO_CFLAGS += $(dmbinstr) +VDSO_AFLAGS += $(dmbinstr) + +VDSO_LDFLAGS := $(VDSO_CPPFLAGS) +# From arm vDSO Makefile +VDSO_LDFLAGS += -Wl,-Bsymbolic -Wl,--no-undefined -Wl,-soname=linux-vdso.so.1 +VDSO_LDFLAGS += -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096 +VDSO_LDFLAGS += -nostdlib -shared -mfloat-abi=soft +VDSO_LDFLAGS += $(call cc32-ldoption,-Wl$(comma)--hash-style=sysv) +VDSO_LDFLAGS += $(call cc32-ldoption,-Wl$(comma)--build-id) +VDSO_LDFLAGS += $(call cc32-ldoption,-fuse-ld=bfd) + + +# Borrow vdsomunge.c from the arm vDSO +# We have to use a relative path because scripts/Makefile.host prefixes +# $(hostprogs-y) with $(obj) +munge := ../../../arm/vdso/vdsomunge +hostprogs-y := $(munge) + +c-obj-vdso := note.o +c-obj-vdso-gettimeofday := vgettimeofday.o +asm-obj-vdso := sigreturn.o + +ifneq ($(c-gettimeofday-y),) +VDSO_CFLAGS_gettimeofday_o += -include $(c-gettimeofday-y) +endif + +VDSO_CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) -Os + +# Build rules +targets := $(c-obj-vdso) $(c-obj-vdso-gettimeofday) $(asm-obj-vdso) vdso.so vdso.so.dbg vdso.so.raw +c-obj-vdso := $(addprefix $(obj)/, $(c-obj-vdso)) +c-obj-vdso-gettimeofday := $(addprefix $(obj)/, $(c-obj-vdso-gettimeofday)) +asm-obj-vdso := $(addprefix $(obj)/, $(asm-obj-vdso)) +obj-vdso := $(c-obj-vdso) $(c-obj-vdso-gettimeofday) $(asm-obj-vdso) + +obj-y += vdso.o +extra-y += vdso.lds +CPPFLAGS_vdso.lds += -P -C -U$(ARCH) + +# Force dependency (vdso.s includes vdso.so through incbin) +$(obj)/vdso.o: $(obj)/vdso.so + +include/generated/vdso32-offsets.h: $(obj)/vdso.so.dbg FORCE + $(call if_changed,vdsosym) + +# Strip rule for vdso.so +$(obj)/vdso.so: OBJCOPYFLAGS := -S +$(obj)/vdso.so: $(obj)/vdso.so.dbg FORCE + $(call if_changed,objcopy) + +$(obj)/vdso.so.dbg: $(obj)/vdso.so.raw $(obj)/$(munge) FORCE + $(call if_changed,vdsomunge) + +# Link rule for the .so file, .lds has to be first +$(obj)/vdso.so.raw: $(src)/vdso.lds $(obj-vdso) FORCE + $(call if_changed,vdsold_and_vdso_check) + +# Compilation rules for the vDSO sources +$(c-obj-vdso): %.o: %.c FORCE + $(call if_changed_dep,vdsocc) +$(c-obj-vdso-gettimeofday): %.o: %.c FORCE + $(call if_changed_dep,vdsocc_gettimeofday) +$(asm-obj-vdso): %.o: %.S FORCE + $(call if_changed_dep,vdsoas) + +# Actual build commands +quiet_cmd_vdsold_and_vdso_check = LD32 $@ + cmd_vdsold_and_vdso_check = $(cmd_vdsold); $(cmd_vdso_check) + +quiet_cmd_vdsold = LD32 $@ + cmd_vdsold = $(CC_COMPAT) -Wp,-MD,$(depfile) $(VDSO_LDFLAGS) \ + -Wl,-T $(filter %.lds,$^) $(filter %.o,$^) -o $@ +quiet_cmd_vdsocc = CC32 $@ + cmd_vdsocc = $(CC_COMPAT) -Wp,-MD,$(depfile) $(VDSO_CFLAGS) -c -o $@ $< +quiet_cmd_vdsocc_gettimeofday = CC32 $@ + cmd_vdsocc_gettimeofday = $(CC_COMPAT) -Wp,-MD,$(depfile) $(VDSO_CFLAGS) $(VDSO_CFLAGS_gettimeofday_o) -c -o $@ $< +quiet_cmd_vdsoas = AS32 $@ + cmd_vdsoas = $(CC_COMPAT) -Wp,-MD,$(depfile) $(VDSO_AFLAGS) -c -o $@ $< + +quiet_cmd_vdsomunge = MUNGE $@ + cmd_vdsomunge = $(obj)/$(munge) $< $@ + +# Generate vDSO offsets using helper script (borrowed from the 64-bit vDSO) +gen-vdsosym := $(srctree)/$(src)/../vdso/gen_vdso_offsets.sh +quiet_cmd_vdsosym = VDSOSYM $@ +# The AArch64 nm should be able to read an AArch32 binary + cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@ + +# Install commands for the unstripped file +quiet_cmd_vdso_install = INSTALL $@ + cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/vdso32.so + +vdso.so: $(obj)/vdso.so.dbg + @mkdir -p $(MODLIB)/vdso + $(call cmd,vdso_install) + +vdso_install: vdso.so diff --git a/arch/arm64/kernel/vdso32/note.c b/arch/arm64/kernel/vdso32/note.c new file mode 100644 index 0000000000000000000000000000000000000000..eff5bf9efb8b7b4e6d8b5f3497186912de0d611b --- /dev/null +++ b/arch/arm64/kernel/vdso32/note.c @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2012-2018 ARM Limited + * + * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text. + * Here we can supply some information useful to userland. + */ + +#include +#include +#include +#include + +ELFNOTE32("Linux", 0, LINUX_VERSION_CODE); +BUILD_SALT; diff --git a/arch/arm64/kernel/vdso32/sigreturn.S b/arch/arm64/kernel/vdso32/sigreturn.S new file mode 100644 index 0000000000000000000000000000000000000000..1a81277c2d09a7798397d40185693e7df13e91fd --- /dev/null +++ b/arch/arm64/kernel/vdso32/sigreturn.S @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This file provides both A32 and T32 versions, in accordance with the + * arm sigreturn code. + * + * Copyright (C) 2018 ARM Limited + */ + +#include +#include +#include + +#define ARM_ENTRY(name) \ + ENTRY(name) + +#define ARM_ENDPROC(name) \ + .type name, %function; \ + END(name) + + .text + + .arm + .fnstart + .save {r0-r15} + .pad #COMPAT_SIGFRAME_REGS_OFFSET + nop +ARM_ENTRY(__kernel_sigreturn_arm) + mov r7, #__NR_compat_sigreturn + svc #0 + .fnend +ARM_ENDPROC(__kernel_sigreturn_arm) + + .fnstart + .save {r0-r15} + .pad #COMPAT_RT_SIGFRAME_REGS_OFFSET + nop +ARM_ENTRY(__kernel_rt_sigreturn_arm) + mov r7, #__NR_compat_rt_sigreturn + svc #0 + .fnend +ARM_ENDPROC(__kernel_rt_sigreturn_arm) + + .thumb + .fnstart + .save {r0-r15} + .pad #COMPAT_SIGFRAME_REGS_OFFSET + nop +ARM_ENTRY(__kernel_sigreturn_thumb) + mov r7, #__NR_compat_sigreturn + svc #0 + .fnend +ARM_ENDPROC(__kernel_sigreturn_thumb) + + .fnstart + .save {r0-r15} + .pad #COMPAT_RT_SIGFRAME_REGS_OFFSET + nop +ARM_ENTRY(__kernel_rt_sigreturn_thumb) + mov r7, #__NR_compat_rt_sigreturn + svc #0 + .fnend +ARM_ENDPROC(__kernel_rt_sigreturn_thumb) diff --git a/arch/arm64/kernel/vdso32/vdso.S b/arch/arm64/kernel/vdso32/vdso.S new file mode 100644 index 0000000000000000000000000000000000000000..e72ac7bc4c04f483f38e588c3098c53c96531a8d --- /dev/null +++ b/arch/arm64/kernel/vdso32/vdso.S @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2012 ARM Limited + */ + +#include +#include +#include +#include + + .globl vdso32_start, vdso32_end + .section .rodata + .balign PAGE_SIZE +vdso32_start: + .incbin "arch/arm64/kernel/vdso32/vdso.so" + .balign PAGE_SIZE +vdso32_end: + + .previous diff --git a/arch/arm64/kernel/vdso32/vdso.lds.S b/arch/arm64/kernel/vdso32/vdso.lds.S new file mode 100644 index 0000000000000000000000000000000000000000..a3944927eaeb49cc29f07e327fdf5f0e40de1cfe --- /dev/null +++ b/arch/arm64/kernel/vdso32/vdso.lds.S @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Adapted from arm64 version. + * + * GNU linker script for the VDSO library. + * Heavily based on the vDSO linker scripts for other archs. + * + * Copyright (C) 2012-2018 ARM Limited + */ + +#include +#include +#include + +OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm", "elf32-littlearm") +OUTPUT_ARCH(arm) + +SECTIONS +{ + PROVIDE_HIDDEN(_vdso_data = . - PAGE_SIZE); + . = VDSO_LBASE + SIZEOF_HEADERS; + + .hash : { *(.hash) } :text + .gnu.hash : { *(.gnu.hash) } + .dynsym : { *(.dynsym) } + .dynstr : { *(.dynstr) } + .gnu.version : { *(.gnu.version) } + .gnu.version_d : { *(.gnu.version_d) } + .gnu.version_r : { *(.gnu.version_r) } + + .note : { *(.note.*) } :text :note + + .dynamic : { *(.dynamic) } :text :dynamic + + .rodata : { *(.rodata*) } :text + + .text : { *(.text*) } :text =0xe7f001f2 + + .got : { *(.got) } + .rel.plt : { *(.rel.plt) } + + /DISCARD/ : { + *(.note.GNU-stack) + *(.data .data.* .gnu.linkonce.d.* .sdata*) + *(.bss .sbss .dynbss .dynsbss) + } +} + +/* + * We must supply the ELF program headers explicitly to get just one + * PT_LOAD segment, and set the flags explicitly to make segments read-only. + */ +PHDRS +{ + text PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */ + dynamic PT_DYNAMIC FLAGS(4); /* PF_R */ + note PT_NOTE FLAGS(4); /* PF_R */ +} + +VERSION +{ + LINUX_2.6 { + global: + __vdso_clock_gettime; + __vdso_gettimeofday; + __vdso_clock_getres; + __kernel_sigreturn_arm; + __kernel_sigreturn_thumb; + __kernel_rt_sigreturn_arm; + __kernel_rt_sigreturn_thumb; + __vdso_clock_gettime64; + local: *; + }; +} + +/* + * Make the sigreturn code visible to the kernel. + */ +VDSO_compat_sigreturn_arm = __kernel_sigreturn_arm; +VDSO_compat_sigreturn_thumb = __kernel_sigreturn_thumb; +VDSO_compat_rt_sigreturn_arm = __kernel_rt_sigreturn_arm; +VDSO_compat_rt_sigreturn_thumb = __kernel_rt_sigreturn_thumb; diff --git a/arch/arm64/kernel/vdso32/vgettimeofday.c b/arch/arm64/kernel/vdso32/vgettimeofday.c new file mode 100644 index 0000000000000000000000000000000000000000..5acff29c599154f25473886f5bee2de0b0937aa2 --- /dev/null +++ b/arch/arm64/kernel/vdso32/vgettimeofday.c @@ -0,0 +1,45 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * ARM64 compat userspace implementations of gettimeofday() and similar. + * + * Copyright (C) 2018 ARM Limited + * + */ + +int __vdso_clock_gettime(clockid_t clock, + struct old_timespec32 *ts) +{ + return __cvdso_clock_gettime32(clock, ts); +} + +int __vdso_clock_gettime64(clockid_t clock, + struct __kernel_timespec *ts) +{ + return __cvdso_clock_gettime(clock, ts); +} + +int __vdso_gettimeofday(struct __kernel_old_timeval *tv, + struct timezone *tz) +{ + return __cvdso_gettimeofday(tv, tz); +} + +int __vdso_clock_getres(clockid_t clock_id, + struct old_timespec32 *res) +{ + return __cvdso_clock_getres_time32(clock_id, res); +} + +/* Avoid unresolved references emitted by GCC */ + +void __aeabi_unwind_cpp_pr0(void) +{ +} + +void __aeabi_unwind_cpp_pr1(void) +{ +} + +void __aeabi_unwind_cpp_pr2(void) +{ +} diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index a6c9fbaeaefcdd71d0ea70c8eeb89c55692f8b66..870e594f95edda61e785de85b92148080febd03c 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c @@ -179,6 +179,13 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) } memcpy((u32 *)regs + off, valp, KVM_REG_SIZE(reg->id)); + + if (*vcpu_cpsr(vcpu) & PSR_MODE32_BIT) { + int i; + + for (i = 0; i < 16; i++) + *vcpu_reg32(vcpu, i) = (u32)*vcpu_reg32(vcpu, i); + } out: return err; } diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S index 8719253168f29f4aa11bd9f81e5393bcb362c0b4..000fb44654147b32b3109be398e12a0a51b2854a 100644 --- a/arch/arm64/mm/cache.S +++ b/arch/arm64/mm/cache.S @@ -230,7 +230,7 @@ ENTRY(__inval_dcache_area) * - start - virtual start address of region * - size - size in question */ -__dma_inv_area: +ENTRY(__dma_inv_area) add x1, x1, x0 dcache_line_size x2, x3 sub x3, x2, #1 @@ -269,7 +269,7 @@ ENTRY(__clean_dcache_area_poc) * - start - virtual start address of region * - size - size in question */ -__dma_clean_area: +ENTRY(__dma_clean_area) dcache_by_line_op cvac, sy, x0, x1, x2, x3 ret ENDPIPROC(__clean_dcache_area_poc) diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index d4043839309c240ffd1f337ce137642375879559..b182b6766f43f754e9a257406363ada2c4953802 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -26,26 +26,47 @@ #include #include #include +#include +#include #include #include #include #include +#include #include #include +#include +#include +#include static int swiotlb __ro_after_init; static pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot, bool coherent) { - if (!coherent || (attrs & DMA_ATTR_WRITE_COMBINE)) + if (attrs & DMA_ATTR_STRONGLY_ORDERED) + return pgprot_noncached(prot); + else if (!coherent || (attrs & DMA_ATTR_WRITE_COMBINE)) return pgprot_writecombine(prot); return prot; } +static bool is_dma_coherent(struct device *dev, unsigned long attrs) +{ + + if (attrs & DMA_ATTR_FORCE_COHERENT) + return true; + else if (attrs & DMA_ATTR_FORCE_NON_COHERENT) + return false; + else if (is_device_dma_coherent(dev)) + return true; + else + return false; +} static struct gen_pool *atomic_pool __ro_after_init; +#define NO_KERNEL_MAPPING_DUMMY 0x2222 #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K static size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE; @@ -93,13 +114,50 @@ static int __free_from_pool(void *start, size_t size) return 1; } +static int __dma_update_pte(pte_t *pte, pgtable_t token, unsigned long addr, + void *data) +{ + struct page *page = virt_to_page(addr); + pgprot_t prot = *(pgprot_t *)data; + + set_pte(pte, mk_pte(page, prot)); + return 0; +} + +static int __dma_clear_pte(pte_t *pte, pgtable_t token, unsigned long addr, + void *data) +{ + pte_clear(&init_mm, addr, pte); + return 0; +} + +static void __dma_remap(struct page *page, size_t size, pgprot_t prot, + bool no_kernel_map) +{ + unsigned long start = (unsigned long) page_address(page); + unsigned long end = start + size; + int (*func)(pte_t *pte, pgtable_t token, unsigned long addr, + void *data); + + if (no_kernel_map) + func = __dma_clear_pte; + else + func = __dma_update_pte; + + apply_to_page_range(&init_mm, start, size, func, &prot); + /* ensure prot is applied before returning */ + mb(); + flush_tlb_kernel_range(start, end); +} + + static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs) { struct page *page; void *ptr, *coherent_ptr; - bool coherent = is_device_dma_coherent(dev); + bool coherent = is_dma_coherent(dev, attrs); pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, false); size = PAGE_ALIGN(size); @@ -115,6 +173,7 @@ static void *__dma_alloc(struct device *dev, size_t size, } ptr = swiotlb_alloc(dev, size, dma_handle, flags, attrs); + if (!ptr) goto no_mem; @@ -122,19 +181,31 @@ static void *__dma_alloc(struct device *dev, size_t size, if (coherent) return ptr; - /* remove any dirty cache lines on the kernel alias */ __dma_flush_area(ptr, size); - /* create a coherent mapping */ - page = virt_to_page(ptr); - coherent_ptr = dma_common_contiguous_remap(page, size, VM_USERMAP, - prot, __builtin_return_address(0)); - if (!coherent_ptr) - goto no_map; - + if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) { + coherent_ptr = (void *)NO_KERNEL_MAPPING_DUMMY; + __dma_remap(virt_to_page(ptr), size, __pgprot(0), true); + } else { + if ((attrs & DMA_ATTR_STRONGLY_ORDERED)) + __dma_remap(virt_to_page(ptr), size, __pgprot(0), true); + + /* create a coherent mapping */ + page = virt_to_page(ptr); + coherent_ptr = dma_common_contiguous_remap( + page, size, VM_USERMAP, prot, + __builtin_return_address(0)); + if (!coherent_ptr) + goto no_map; + } return coherent_ptr; no_map: + if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) || + (attrs & DMA_ATTR_STRONGLY_ORDERED)) + __dma_remap(phys_to_page(dma_to_phys(dev, *dma_handle)), + size, PAGE_KERNEL, false); + swiotlb_free(dev, size, ptr, *dma_handle, attrs); no_mem: return NULL; @@ -148,11 +219,17 @@ static void __dma_free(struct device *dev, size_t size, size = PAGE_ALIGN(size); - if (!is_device_dma_coherent(dev)) { + if (!is_dma_coherent(dev, attrs)) { if (__free_from_pool(vaddr, size)) return; - vunmap(vaddr); + if (!(attrs & DMA_ATTR_NO_KERNEL_MAPPING)) + vunmap(vaddr); } + if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) || + (attrs & DMA_ATTR_STRONGLY_ORDERED)) + __dma_remap(phys_to_page(dma_to_phys(dev, dma_handle)), + size, PAGE_KERNEL, false); + swiotlb_free(dev, size, swiotlb_addr, dma_handle, attrs); } @@ -164,7 +241,7 @@ static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page, dma_addr_t dev_addr; dev_addr = swiotlb_map_page(dev, page, offset, size, dir, attrs); - if (!is_device_dma_coherent(dev) && + if (!is_dma_coherent(dev, attrs) && (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir); @@ -176,7 +253,7 @@ static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, unsigned long attrs) { - if (!is_device_dma_coherent(dev) && + if (!is_dma_coherent(dev, attrs) && (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir); swiotlb_unmap_page(dev, dev_addr, size, dir, attrs); @@ -190,7 +267,7 @@ static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl, int i, ret; ret = swiotlb_map_sg_attrs(dev, sgl, nelems, dir, attrs); - if (!is_device_dma_coherent(dev) && + if (!is_dma_coherent(dev, attrs) && (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) for_each_sg(sgl, sg, ret, i) __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)), @@ -207,7 +284,7 @@ static void __swiotlb_unmap_sg_attrs(struct device *dev, struct scatterlist *sg; int i; - if (!is_device_dma_coherent(dev) && + if (!is_dma_coherent(dev, attrs) && (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) for_each_sg(sgl, sg, nelems, i) __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)), @@ -284,11 +361,11 @@ static int __swiotlb_mmap(struct device *dev, void *cpu_addr, dma_addr_t dma_addr, size_t size, unsigned long attrs) { - int ret; + int ret = -ENXIO; unsigned long pfn = dma_to_phys(dev, dma_addr) >> PAGE_SHIFT; vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot, - is_device_dma_coherent(dev)); + is_dma_coherent(dev, attrs)); if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) return ret; @@ -447,7 +524,7 @@ static int __init atomic_pool_init(void) goto out; remove_mapping: - dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP); + dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP, false); destroy_genpool: gen_pool_destroy(atomic_pool); atomic_pool = NULL; @@ -468,6 +545,7 @@ static void *__dummy_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs) { + WARN(1, "dma alloc failure, device may be missing a call to arch_setup_dma_ops"); return NULL; } @@ -582,7 +660,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, unsigned long attrs) { - bool coherent = is_device_dma_coherent(dev); + bool coherent = is_dma_coherent(dev, attrs); int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs); size_t iosize = size; void *addr; @@ -596,7 +674,8 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size, * Some drivers rely on this, and we probably don't want the * possibility of stale kernel data being read by devices anyway. */ - gfp |= __GFP_ZERO; + if (!(attrs & DMA_ATTR_SKIP_ZEROING)) + gfp |= __GFP_ZERO; if (!gfpflags_allow_blocking(gfp)) { struct page *page; @@ -688,17 +767,16 @@ static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, __free_from_pool(cpu_addr, size); } else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) { struct page *page = vmalloc_to_page(cpu_addr); - iommu_dma_unmap_page(dev, handle, iosize, 0, attrs); dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT); - dma_common_free_remap(cpu_addr, size, VM_USERMAP); - } else if (is_vmalloc_addr(cpu_addr)){ + dma_common_free_remap(cpu_addr, size, VM_USERMAP, false); + } else if (is_vmalloc_addr(cpu_addr)) { struct vm_struct *area = find_vm_area(cpu_addr); if (WARN_ON(!area || !area->pages)) return; iommu_dma_free(dev, area->pages, iosize, &handle); - dma_common_free_remap(cpu_addr, size, VM_USERMAP); + dma_common_free_remap(cpu_addr, size, VM_USERMAP, false); } else { iommu_dma_unmap_page(dev, handle, iosize, 0, 0); __free_pages(virt_to_page(cpu_addr), get_order(size)); @@ -711,32 +789,31 @@ static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, { struct vm_struct *area; int ret; + unsigned long pfn = 0; vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot, - is_device_dma_coherent(dev)); + is_dma_coherent(dev, attrs)); if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) return ret; - if (!is_vmalloc_addr(cpu_addr)) { - unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr)); - return __swiotlb_mmap_pfn(vma, pfn, size); - } + area = find_vm_area(cpu_addr); - if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) { + if (area && area->pages) + return iommu_dma_mmap(area->pages, size, vma); + else if (!is_vmalloc_addr(cpu_addr)) + pfn = page_to_pfn(virt_to_page(cpu_addr)); + else if (is_vmalloc_addr(cpu_addr)) /* - * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped, - * hence in the vmalloc space. + * DMA_ATTR_FORCE_CONTIGUOUS and atomic pool allocations are + * always remapped, hence in the vmalloc space. */ - unsigned long pfn = vmalloc_to_pfn(cpu_addr); - return __swiotlb_mmap_pfn(vma, pfn, size); - } + pfn = vmalloc_to_pfn(cpu_addr); - area = find_vm_area(cpu_addr); - if (WARN_ON(!area || !area->pages)) - return -ENXIO; + if (pfn) + return __swiotlb_mmap_pfn(vma, pfn, size); - return iommu_dma_mmap(area->pages, size, vma); + return -ENXIO; } static int __iommu_get_sgtable(struct device *dev, struct sg_table *sgt, @@ -744,27 +821,24 @@ static int __iommu_get_sgtable(struct device *dev, struct sg_table *sgt, size_t size, unsigned long attrs) { unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; + struct page *page = NULL; struct vm_struct *area = find_vm_area(cpu_addr); - if (!is_vmalloc_addr(cpu_addr)) { - struct page *page = virt_to_page(cpu_addr); - return __swiotlb_get_sgtable_page(sgt, page, size); - } - - if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) { + if (area && area->pages) + return sg_alloc_table_from_pages(sgt, area->pages, count, 0, + size, GFP_KERNEL); + else if (!is_vmalloc_addr(cpu_addr)) + page = virt_to_page(cpu_addr); + else if (is_vmalloc_addr(cpu_addr)) /* - * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped, - * hence in the vmalloc space. + * DMA_ATTR_FORCE_CONTIGUOUS and atomic pool allocations + * are always remapped, hence in the vmalloc space. */ - struct page *page = vmalloc_to_page(cpu_addr); - return __swiotlb_get_sgtable_page(sgt, page, size); - } + page = vmalloc_to_page(cpu_addr); - if (WARN_ON(!area || !area->pages)) - return -ENXIO; - - return sg_alloc_table_from_pages(sgt, area->pages, count, 0, size, - GFP_KERNEL); + if (page) + return __swiotlb_get_sgtable_page(sgt, page, size); + return -ENXIO; } static void __iommu_sync_single_for_cpu(struct device *dev, @@ -772,11 +846,12 @@ static void __iommu_sync_single_for_cpu(struct device *dev, enum dma_data_direction dir) { phys_addr_t phys; + struct iommu_domain *domain = iommu_get_domain_for_dev(dev); - if (is_device_dma_coherent(dev)) + if (!domain || iommu_is_iova_coherent(domain, dev_addr)) return; - phys = iommu_iova_to_phys(iommu_get_domain_for_dev(dev), dev_addr); + phys = iommu_iova_to_phys(domain, dev_addr); __dma_unmap_area(phys_to_virt(phys), size, dir); } @@ -785,11 +860,12 @@ static void __iommu_sync_single_for_device(struct device *dev, enum dma_data_direction dir) { phys_addr_t phys; + struct iommu_domain *domain = iommu_get_domain_for_dev(dev); - if (is_device_dma_coherent(dev)) + if (!domain || iommu_is_iova_coherent(domain, dev_addr)) return; - phys = iommu_iova_to_phys(iommu_get_domain_for_dev(dev), dev_addr); + phys = iommu_iova_to_phys(domain, dev_addr); __dma_map_area(phys_to_virt(phys), size, dir); } @@ -798,7 +874,7 @@ static dma_addr_t __iommu_map_page(struct device *dev, struct page *page, enum dma_data_direction dir, unsigned long attrs) { - bool coherent = is_device_dma_coherent(dev); + bool coherent = is_dma_coherent(dev, attrs); int prot = dma_info_to_prot(dir, coherent, attrs); dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot); @@ -824,9 +900,11 @@ static void __iommu_sync_sg_for_cpu(struct device *dev, enum dma_data_direction dir) { struct scatterlist *sg; + dma_addr_t iova = sg_dma_address(sgl); + struct iommu_domain *domain = iommu_get_domain_for_dev(dev); int i; - if (is_device_dma_coherent(dev)) + if (!domain || iommu_is_iova_coherent(domain, iova)) return; for_each_sg(sgl, sg, nelems, i) @@ -838,9 +916,11 @@ static void __iommu_sync_sg_for_device(struct device *dev, enum dma_data_direction dir) { struct scatterlist *sg; + dma_addr_t iova = sg_dma_address(sgl); + struct iommu_domain *domain = iommu_get_domain_for_dev(dev); int i; - if (is_device_dma_coherent(dev)) + if (!domain || iommu_is_iova_coherent(domain, iova)) return; for_each_sg(sgl, sg, nelems, i) @@ -851,13 +931,18 @@ static int __iommu_map_sg_attrs(struct device *dev, struct scatterlist *sgl, int nelems, enum dma_data_direction dir, unsigned long attrs) { - bool coherent = is_device_dma_coherent(dev); + bool coherent = is_dma_coherent(dev, attrs); + int ret; + + ret = iommu_dma_map_sg(dev, sgl, nelems, + dma_info_to_prot(dir, coherent, attrs)); + if (!ret) + return ret; if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) __iommu_sync_sg_for_device(dev, sgl, nelems, dir); - return iommu_dma_map_sg(dev, sgl, nelems, - dma_info_to_prot(dir, coherent, attrs)); + return ret; } static void __iommu_unmap_sg_attrs(struct device *dev, @@ -895,50 +980,14 @@ static int __init __iommu_dma_init(void) } arch_initcall(__iommu_dma_init); -static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, - const struct iommu_ops *ops) -{ - struct iommu_domain *domain; - - if (!ops) - return; - - /* - * The IOMMU core code allocates the default DMA domain, which the - * underlying IOMMU driver needs to support via the dma-iommu layer. - */ - domain = iommu_get_domain_for_dev(dev); - - if (!domain) - goto out_err; - - if (domain->type == IOMMU_DOMAIN_DMA) { - if (iommu_dma_init_domain(domain, dma_base, size, dev)) - goto out_err; - - dev->dma_ops = &iommu_dma_ops; - } - - return; - -out_err: - pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n", - dev_name(dev)); -} - void arch_teardown_dma_ops(struct device *dev) { dev->dma_ops = NULL; } - -#else - -static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, - const struct iommu_ops *iommu) -{ } - #endif /* CONFIG_IOMMU_DMA */ +static void arm_iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size); + void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, const struct iommu_ops *iommu, bool coherent) { @@ -950,7 +999,7 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, } dev->archdata.dma_coherent = coherent; - __iommu_setup_dma_ops(dev, dma_base, size, iommu); + arm_iommu_setup_dma_ops(dev, dma_base, size); #ifdef CONFIG_XEN if (xen_initial_domain()) { @@ -960,3 +1009,149 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, #endif } EXPORT_SYMBOL_GPL(arch_setup_dma_ops); + +#ifdef CONFIG_ARM64_DMA_USE_IOMMU + +/* guards initialization of default_domain->iova_cookie */ +static DEFINE_MUTEX(iommu_dma_init_mutex); + +static int +iommu_init_mapping(struct device *dev, struct dma_iommu_mapping *mapping) +{ + struct iommu_domain *domain = mapping->domain; + dma_addr_t dma_base = mapping->base; + u64 size = mapping->bits << PAGE_SHIFT; + int ret; + bool own_cookie; + + /* + * if own_cookie is false, then we are sharing the iova_cookie with + * another driver, and should not free it on error. Cleanup will be + * done when the iommu_domain is freed. + */ + own_cookie = !domain->iova_cookie; + + if (own_cookie) { + ret = iommu_get_dma_cookie(domain); + if (ret) { + dev_err(dev, "iommu_get_dma_cookie failed: %d\n", ret); + return ret; + } + } + + ret = iommu_dma_init_domain(domain, dma_base, size, dev); + if (ret) { + dev_err(dev, "iommu_dma_init_domain failed: %d\n", ret); + if (own_cookie) + iommu_put_dma_cookie(domain); + return ret; + } + + mapping->ops = &iommu_dma_ops; + return 0; +} + +static int arm_iommu_get_dma_cookie(struct device *dev, + struct dma_iommu_mapping *mapping) +{ + int s1_bypass = 0, is_fast = 0; + int err = 0; + + mutex_lock(&iommu_dma_init_mutex); + + iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_S1_BYPASS, + &s1_bypass); + iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_FAST, &is_fast); + + if (s1_bypass) + mapping->ops = &arm64_swiotlb_dma_ops; + else if (is_fast) + err = fast_smmu_init_mapping(dev, mapping); + else + err = iommu_init_mapping(dev, mapping); + + mutex_unlock(&iommu_dma_init_mutex); + return err; +} + +/* + * Checks for "qcom,iommu-dma-addr-pool" property. + * If not present, leaves dma_addr and dma_size unmodified. + */ +static void arm_iommu_get_dma_window(struct device *dev, u64 *dma_addr, + u64 *dma_size) +{ + struct device_node *np; + int naddr, nsize, len; + const __be32 *ranges; + + if (!dev->of_node) + return; + + np = of_parse_phandle(dev->of_node, "qcom,iommu-group", 0); + if (!np) + np = dev->of_node; + + ranges = of_get_property(np, "qcom,iommu-dma-addr-pool", &len); + if (!ranges) + return; + + len /= sizeof(u32); + naddr = of_n_addr_cells(np); + nsize = of_n_size_cells(np); + if (len < naddr + nsize) { + dev_err(dev, "Invalid length for qcom,iommu-dma-addr-pool, expected %d cells\n", + naddr + nsize); + return; + } + if (naddr == 0 || nsize == 0) { + dev_err(dev, "Invalid #address-cells %d or #size-cells %d\n", + naddr, nsize); + return; + } + + *dma_addr = of_read_number(ranges, naddr); + *dma_size = of_read_number(ranges + naddr, nsize); +} + +static void arm_iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size) +{ + struct iommu_domain *domain; + struct iommu_group *group; + struct dma_iommu_mapping mapping = {0}; + + group = dev->iommu_group; + if (!group) + return; + + arm_iommu_get_dma_window(dev, &dma_base, &size); + + domain = iommu_get_domain_for_dev(dev); + if (!domain) + return; + + /* Allow iommu-debug to call arch_setup_dma_ops to reconfigure itself */ + if (domain->type != IOMMU_DOMAIN_DMA && + !of_device_is_compatible(dev->of_node, "iommu-debug-test")) { + dev_err(dev, "Invalid iommu domain type!\n"); + return; + } + + mapping.base = dma_base; + mapping.bits = size >> PAGE_SHIFT; + mapping.domain = domain; + + if (arm_iommu_get_dma_cookie(dev, &mapping)) { + dev_err(dev, "Failed to get dma cookie\n"); + return; + } + + set_dma_ops(dev, mapping.ops); +} + +#else /*!CONFIG_ARM64_DMA_USE_IOMMU */ + +static void arm_iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size) +{ +} +#endif diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c index f58ea503ad014fda52fbab06e6edc743551a4b6c..1d765676131666a30db9b5f1b27b7d43f93dcd8e 100644 --- a/arch/arm64/mm/hugetlbpage.c +++ b/arch/arm64/mm/hugetlbpage.c @@ -218,6 +218,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, ptep = (pte_t *)pudp; } else if (sz == (PAGE_SIZE * CONT_PTES)) { pmdp = pmd_alloc(mm, pudp, addr); + if (!pmdp) + return NULL; WARN_ON(addr & (sz - 1)); /* diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 29d2f425806e3ff181d82c65fc6c6ab944599991..65efa46cf0af072dfc4384b7ef54a72ef171413c 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -53,6 +53,8 @@ #include #include +EXPORT_SYMBOL_GPL(kimage_vaddr); + /* * We need to be able to catch inadvertent references to memstart_addr * that occur (potentially in generic code) before arm64_memblock_init() diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 222fd54233e8188284f57b239d26a4bbd64c6cdd..29238f6502a394aa3a4ebfebfd7187fd6c91a2b2 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -32,6 +32,8 @@ #include #include #include +#include +#include #include #include @@ -67,6 +69,40 @@ static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss; static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused; static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused; +struct dma_contig_early_reserve { + phys_addr_t base; + unsigned long size; +}; + +static struct dma_contig_early_reserve dma_mmu_remap[MAX_CMA_AREAS]; +static int dma_mmu_remap_num; + +void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) +{ + if (dma_mmu_remap_num >= ARRAY_SIZE(dma_mmu_remap)) { + pr_err("ARM64: Not enough slots for DMA fixup reserved regions!\n"); + return; + } + dma_mmu_remap[dma_mmu_remap_num].base = base; + dma_mmu_remap[dma_mmu_remap_num].size = size; + dma_mmu_remap_num++; +} + +static bool dma_overlap(phys_addr_t start, phys_addr_t end) +{ + int i; + + for (i = 0; i < dma_mmu_remap_num; i++) { + phys_addr_t dma_base = dma_mmu_remap[i].base; + phys_addr_t dma_end = dma_mmu_remap[i].base + + dma_mmu_remap[i].size; + + if ((dma_base < end) && (dma_end > start)) + return true; + } + return false; +} + pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, unsigned long size, pgprot_t vma_prot) { @@ -200,7 +236,8 @@ static void init_pmd(pud_t *pudp, unsigned long addr, unsigned long end, /* try section mapping first */ if (((addr | next | phys) & ~SECTION_MASK) == 0 && - (flags & NO_BLOCK_MAPPINGS) == 0) { + (flags & NO_BLOCK_MAPPINGS) == 0 && + !dma_overlap(phys, phys + next - addr)) { pmd_set_huge(pmdp, phys, prot); /* @@ -299,7 +336,8 @@ static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end, * For 4K granule only, attempt to put down a 1GB block */ if (use_1G_block(addr, next, phys) && - (flags & NO_BLOCK_MAPPINGS) == 0) { + (flags & NO_BLOCK_MAPPINGS) == 0 && + !dma_overlap(phys, phys + next - addr)) { pud_set_huge(pudp, phys, prot); /* @@ -720,6 +758,7 @@ int kern_addr_valid(unsigned long addr) return pfn_valid(pte_pfn(pte)); } +EXPORT_SYMBOL_GPL(kern_addr_valid); #ifdef CONFIG_SPARSEMEM_VMEMMAP #if !ARM64_SWAPPER_USES_SECTION_MAPS int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, diff --git a/arch/hexagon/include/asm/io.h b/arch/hexagon/include/asm/io.h index e17262ad125e7ee0c48be297dfff08dc50f2d5e6..bd429d6a3163c0d4d165cc94b9d12b9e6f211e7f 100644 --- a/arch/hexagon/include/asm/io.h +++ b/arch/hexagon/include/asm/io.h @@ -186,16 +186,10 @@ static inline void writel(u32 data, volatile void __iomem *addr) #define mmiowb() -/* - * Need an mtype somewhere in here, for cache type deals? - * This is probably too long for an inline. - */ -void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size); +void __iomem *ioremap(unsigned long phys_addr, unsigned long size); +#define ioremap_nocache ioremap +#define ioremap_uc(X, Y) ioremap((X), (Y)) -static inline void __iomem *ioremap(unsigned long phys_addr, unsigned long size) -{ - return ioremap_nocache(phys_addr, size); -} static inline void iounmap(volatile void __iomem *addr) { diff --git a/arch/hexagon/kernel/hexagon_ksyms.c b/arch/hexagon/kernel/hexagon_ksyms.c index aa248f59543194d3f99f3be6f041e262d8e5351f..d13d4a06ee383ea744d777354f9b8a9185ad4a2a 100644 --- a/arch/hexagon/kernel/hexagon_ksyms.c +++ b/arch/hexagon/kernel/hexagon_ksyms.c @@ -33,7 +33,7 @@ EXPORT_SYMBOL(__vmgetie); EXPORT_SYMBOL(__vmsetie); EXPORT_SYMBOL(__vmyield); EXPORT_SYMBOL(empty_zero_page); -EXPORT_SYMBOL(ioremap_nocache); +EXPORT_SYMBOL(ioremap); EXPORT_SYMBOL(memcpy); EXPORT_SYMBOL(memset); diff --git a/arch/hexagon/mm/ioremap.c b/arch/hexagon/mm/ioremap.c index d27d67224046a9c87d6c5411d465e17ee104afc5..370ade265e584a73f945fa6219013420657f2b21 100644 --- a/arch/hexagon/mm/ioremap.c +++ b/arch/hexagon/mm/ioremap.c @@ -22,7 +22,7 @@ #include #include -void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size) +void __iomem *ioremap(unsigned long phys_addr, unsigned long size) { unsigned long last_addr, addr; unsigned long offset = phys_addr & ~PAGE_MASK; diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index a830a9701e501a8c73b1db9dec07660a3ac5e007..f020bec96265b012bc08ec48e5d211584768600f 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -21,6 +21,7 @@ config MIPS select GENERIC_CLOCKEVENTS select GENERIC_CMOS_UPDATE select GENERIC_CPU_AUTOPROBE + select GENERIC_GETTIMEOFDAY select GENERIC_IRQ_PROBE select GENERIC_IRQ_SHOW select GENERIC_LIB_ASHLDI3 @@ -71,6 +72,7 @@ config MIPS select HAVE_STACKPROTECTOR select HAVE_SYSCALL_TRACEPOINTS select HAVE_VIRT_CPU_ACCOUNTING_GEN if 64BIT || !SMP + select HAVE_GENERIC_VDSO select IRQ_FORCED_THREADING select MODULES_USE_ELF_RELA if MODULES && 64BIT select MODULES_USE_ELF_REL if MODULES diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c index 8272d8c648ca9fd4f218093212c89ad5245f4eb8..43e4fc1b373ca37809698f29a1ce0f61615f623d 100644 --- a/arch/mips/cavium-octeon/octeon-irq.c +++ b/arch/mips/cavium-octeon/octeon-irq.c @@ -2199,6 +2199,9 @@ static int octeon_irq_cib_map(struct irq_domain *d, } cd = kzalloc(sizeof(*cd), GFP_KERNEL); + if (!cd) + return -ENOMEM; + cd->host_data = host_data; cd->bit = hw; diff --git a/arch/mips/include/asm/clocksource.h b/arch/mips/include/asm/clocksource.h index 3deb1d0c1a94ceeb94bed27a053f79abfc20bb4c..7ab2c220cfb51a68aa5d813511b66b2a1e254bef 100644 --- a/arch/mips/include/asm/clocksource.h +++ b/arch/mips/include/asm/clocksource.h @@ -11,19 +11,6 @@ #ifndef __ASM_CLOCKSOURCE_H #define __ASM_CLOCKSOURCE_H -#include - -/* VDSO clocksources. */ -#define VDSO_CLOCK_NONE 0 /* No suitable clocksource. */ -#define VDSO_CLOCK_R4K 1 /* Use the coprocessor 0 count. */ -#define VDSO_CLOCK_GIC 2 /* Use the GIC. */ - -/** - * struct arch_clocksource_data - Architecture-specific clocksource information. - * @vdso_clock_mode: Method the VDSO should use to access the clocksource. - */ -struct arch_clocksource_data { - u8 vdso_clock_mode; -}; +#include #endif /* __ASM_CLOCKSOURCE_H */ diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h index c373eb605040246ad67e332f72bc6de8aee0dfa8..d0f7edc40ff72a3ee2775475b9dda9274a9f5e0d 100644 --- a/arch/mips/include/asm/processor.h +++ b/arch/mips/include/asm/processor.h @@ -22,6 +22,7 @@ #include #include #include +#include /* * Return current * instruction pointer ("program counter"). @@ -386,21 +387,6 @@ unsigned long get_wchan(struct task_struct *p); #define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[29]) #define KSTK_STATUS(tsk) (task_pt_regs(tsk)->cp0_status) -#ifdef CONFIG_CPU_LOONGSON3 -/* - * Loongson-3's SFB (Store-Fill-Buffer) may buffer writes indefinitely when a - * tight read loop is executed, because reads take priority over writes & the - * hardware (incorrectly) doesn't ensure that writes will eventually occur. - * - * Since spin loops of any kind should have a cpu_relax() in them, force an SFB - * flush from cpu_relax() such that any pending writes will become visible as - * expected. - */ -#define cpu_relax() smp_mb() -#else -#define cpu_relax() barrier() -#endif - /* * Return_address is a replacement for __builtin_return_address(count) * which on certain architectures cannot reasonably be implemented in GCC diff --git a/arch/mips/include/asm/vdso.h b/arch/mips/include/asm/vdso.h index 91bf0c2c265cbbe3e2e32ea596dabe22f8aa8662..285241da2284a1c24fdde0aee24161f5b495ad36 100644 --- a/arch/mips/include/asm/vdso.h +++ b/arch/mips/include/asm/vdso.h @@ -12,6 +12,7 @@ #define __ASM_VDSO_H #include +#include #include @@ -53,84 +54,9 @@ extern struct mips_vdso_image vdso_image_o32; extern struct mips_vdso_image vdso_image_n32; #endif -/** - * union mips_vdso_data - Data provided by the kernel for the VDSO. - * @xtime_sec: Current real time (seconds part). - * @xtime_nsec: Current real time (nanoseconds part, shifted). - * @wall_to_mono_sec: Wall-to-monotonic offset (seconds part). - * @wall_to_mono_nsec: Wall-to-monotonic offset (nanoseconds part). - * @seq_count: Counter to synchronise updates (odd = updating). - * @cs_shift: Clocksource shift value. - * @clock_mode: Clocksource to use for time functions. - * @cs_mult: Clocksource multiplier value. - * @cs_cycle_last: Clock cycle value at last update. - * @cs_mask: Clocksource mask value. - * @tz_minuteswest: Minutes west of Greenwich (from timezone). - * @tz_dsttime: Type of DST correction (from timezone). - * - * This structure contains data needed by functions within the VDSO. It is - * populated by the kernel and mapped read-only into user memory. The time - * fields are mirrors of internal data from the timekeeping infrastructure. - * - * Note: Care should be taken when modifying as the layout must remain the same - * for both 64- and 32-bit (for 32-bit userland on 64-bit kernel). - */ union mips_vdso_data { - struct { - u64 xtime_sec; - u64 xtime_nsec; - u64 wall_to_mono_sec; - u64 wall_to_mono_nsec; - u32 seq_count; - u32 cs_shift; - u8 clock_mode; - u32 cs_mult; - u64 cs_cycle_last; - u64 cs_mask; - s32 tz_minuteswest; - s32 tz_dsttime; - }; - + struct vdso_data data[CS_BASES]; u8 page[PAGE_SIZE]; }; -static inline u32 vdso_data_read_begin(const union mips_vdso_data *data) -{ - u32 seq; - - while (true) { - seq = READ_ONCE(data->seq_count); - if (likely(!(seq & 1))) { - /* Paired with smp_wmb() in vdso_data_write_*(). */ - smp_rmb(); - return seq; - } - - cpu_relax(); - } -} - -static inline bool vdso_data_read_retry(const union mips_vdso_data *data, - u32 start_seq) -{ - /* Paired with smp_wmb() in vdso_data_write_*(). */ - smp_rmb(); - return unlikely(data->seq_count != start_seq); -} - -static inline void vdso_data_write_begin(union mips_vdso_data *data) -{ - ++data->seq_count; - - /* Ensure sequence update is written before other data page values. */ - smp_wmb(); -} - -static inline void vdso_data_write_end(union mips_vdso_data *data) -{ - /* Ensure data values are written before updating sequence again. */ - smp_wmb(); - ++data->seq_count; -} - #endif /* __ASM_VDSO_H */ diff --git a/arch/mips/include/asm/vdso/clocksource.h b/arch/mips/include/asm/vdso/clocksource.h new file mode 100644 index 0000000000000000000000000000000000000000..8931462ae136338efd76526135dbfdd1e54bdd4b --- /dev/null +++ b/arch/mips/include/asm/vdso/clocksource.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#ifndef __ASM_VDSOCLOCKSOURCE_H +#define __ASM_VDSOCLOCKSOURCE_H + +#include + +/* VDSO clocksources. */ +#define VDSO_CLOCK_NONE 0 /* No suitable clocksource. */ +#define VDSO_CLOCK_R4K 1 /* Use the coprocessor 0 count. */ +#define VDSO_CLOCK_GIC 2 /* Use the GIC. */ + +/** + * struct arch_clocksource_data - Architecture-specific clocksource information. + * @vdso_clock_mode: Method the VDSO should use to access the clocksource. + */ +struct arch_clocksource_data { + u8 vdso_clock_mode; +}; + +#endif /* __ASM_VDSOCLOCKSOURCE_H */ diff --git a/arch/mips/include/asm/vdso/gettimeofday.h b/arch/mips/include/asm/vdso/gettimeofday.h new file mode 100644 index 0000000000000000000000000000000000000000..7e588f1242c872f15f11f5ca87cfb5749709762c --- /dev/null +++ b/arch/mips/include/asm/vdso/gettimeofday.h @@ -0,0 +1,207 @@ +/* + * Copyright (C) 2018 ARM Limited + * Copyright (C) 2015 Imagination Technologies + * Author: Alex Smith + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#ifndef __ASM_VDSO_GETTIMEOFDAY_H +#define __ASM_VDSO_GETTIMEOFDAY_H + +#ifndef __ASSEMBLY__ + +#include +#include +#include +#include + +#define VDSO_HAS_CLOCK_GETRES 1 + +#define __VDSO_USE_SYSCALL ULLONG_MAX + +static __always_inline long gettimeofday_fallback( + struct __kernel_old_timeval *_tv, + struct timezone *_tz) +{ + register struct timezone *tz asm("a1") = _tz; + register struct __kernel_old_timeval *tv asm("a0") = _tv; + register long ret asm("v0"); + register long nr asm("v0") = __NR_gettimeofday; + register long error asm("a3"); + + asm volatile( + " syscall\n" + : "=r" (ret), "=r" (error) + : "r" (tv), "r" (tz), "r" (nr) + : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13", + "$14", "$15", "$24", "$25", "hi", "lo", "memory"); + + return error ? -ret : ret; +} + +static __always_inline long clock_gettime_fallback( + clockid_t _clkid, + struct __kernel_timespec *_ts) +{ + register struct __kernel_timespec *ts asm("a1") = _ts; + register clockid_t clkid asm("a0") = _clkid; + register long ret asm("v0"); +#if _MIPS_SIM == _MIPS_SIM_ABI64 + register long nr asm("v0") = __NR_clock_gettime; +#else + register long nr asm("v0") = __NR_clock_gettime64; +#endif + register long error asm("a3"); + + asm volatile( + " syscall\n" + : "=r" (ret), "=r" (error) + : "r" (clkid), "r" (ts), "r" (nr) + : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13", + "$14", "$15", "$24", "$25", "hi", "lo", "memory"); + + return error ? -ret : ret; +} + +static __always_inline int clock_getres_fallback( + clockid_t _clkid, + struct __kernel_timespec *_ts) +{ + register struct __kernel_timespec *ts asm("a1") = _ts; + register clockid_t clkid asm("a0") = _clkid; + register long ret asm("v0"); +#if _MIPS_SIM == _MIPS_SIM_ABI64 + register long nr asm("v0") = __NR_clock_getres; +#else + register long nr asm("v0") = __NR_clock_getres_time64; +#endif + register long error asm("a3"); + + asm volatile( + " syscall\n" + : "=r" (ret), "=r" (error) + : "r" (clkid), "r" (ts), "r" (nr) + : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13", + "$14", "$15", "$24", "$25", "hi", "lo", "memory"); + + return error ? -ret : ret; +} + +#if _MIPS_SIM != _MIPS_SIM_ABI64 + +#define VDSO_HAS_32BIT_FALLBACK 1 + +static __always_inline long clock_gettime32_fallback( + clockid_t _clkid, + struct old_timespec32 *_ts) +{ + register struct old_timespec32 *ts asm("a1") = _ts; + register clockid_t clkid asm("a0") = _clkid; + register long ret asm("v0"); + register long nr asm("v0") = __NR_clock_gettime; + register long error asm("a3"); + + asm volatile( + " syscall\n" + : "=r" (ret), "=r" (error) + : "r" (clkid), "r" (ts), "r" (nr) + : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13", + "$14", "$15", "$24", "$25", "hi", "lo", "memory"); + + return error ? -ret : ret; +} + +static __always_inline int clock_getres32_fallback( + clockid_t _clkid, + struct old_timespec32 *_ts) +{ + register struct old_timespec32 *ts asm("a1") = _ts; + register clockid_t clkid asm("a0") = _clkid; + register long ret asm("v0"); + register long nr asm("v0") = __NR_clock_getres; + register long error asm("a3"); + + asm volatile( + " syscall\n" + : "=r" (ret), "=r" (error) + : "r" (clkid), "r" (ts), "r" (nr) + : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13", + "$14", "$15", "$24", "$25", "hi", "lo", "memory"); + + return error ? -ret : ret; +} +#endif + +#ifdef CONFIG_CSRC_R4K + +static __always_inline u64 read_r4k_count(void) +{ + unsigned int count; + + __asm__ __volatile__( + " .set push\n" + " .set mips32r2\n" + " rdhwr %0, $2\n" + " .set pop\n" + : "=r" (count)); + + return count; +} + +#endif + +#ifdef CONFIG_CLKSRC_MIPS_GIC + +static __always_inline u64 read_gic_count(const struct vdso_data *data) +{ + void __iomem *gic = get_gic(data); + u32 hi, hi2, lo; + + do { + hi = __raw_readl(gic + sizeof(lo)); + lo = __raw_readl(gic); + hi2 = __raw_readl(gic + sizeof(lo)); + } while (hi2 != hi); + + return (((u64)hi) << 32) + lo; +} + +#endif + +static __always_inline u64 __arch_get_hw_counter(s32 clock_mode) +{ +#ifdef CONFIG_CLKSRC_MIPS_GIC + const struct vdso_data *data = get_vdso_data(); +#endif + u64 cycle_now; + + switch (clock_mode) { +#ifdef CONFIG_CSRC_R4K + case VDSO_CLOCK_R4K: + cycle_now = read_r4k_count(); + break; +#endif +#ifdef CONFIG_CLKSRC_MIPS_GIC + case VDSO_CLOCK_GIC: + cycle_now = read_gic_count(data); + break; +#endif + default: + cycle_now = __VDSO_USE_SYSCALL; + break; + } + + return cycle_now; +} + +static __always_inline const struct vdso_data *__arch_get_vdso_data(void) +{ + return get_vdso_data(); +} + +#endif /* !__ASSEMBLY__ */ + +#endif /* __ASM_VDSO_GETTIMEOFDAY_H */ diff --git a/arch/mips/include/asm/vdso/processor.h b/arch/mips/include/asm/vdso/processor.h new file mode 100644 index 0000000000000000000000000000000000000000..511c95d735e654d7aac3ce1ae89ce87ee4c1adcc --- /dev/null +++ b/arch/mips/include/asm/vdso/processor.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2020 ARM Ltd. + */ +#ifndef __ASM_VDSO_PROCESSOR_H +#define __ASM_VDSO_PROCESSOR_H + +#ifndef __ASSEMBLY__ + +#ifdef CONFIG_CPU_LOONGSON64 +/* + * Loongson-3's SFB (Store-Fill-Buffer) may buffer writes indefinitely when a + * tight read loop is executed, because reads take priority over writes & the + * hardware (incorrectly) doesn't ensure that writes will eventually occur. + * + * Since spin loops of any kind should have a cpu_relax() in them, force an SFB + * flush from cpu_relax() such that any pending writes will become visible as + * expected. + */ +#define cpu_relax() smp_mb() +#else +#define cpu_relax() barrier() +#endif + +#endif /* __ASSEMBLY__ */ + +#endif /* __ASM_VDSO_PROCESSOR_H */ diff --git a/arch/mips/vdso/vdso.h b/arch/mips/include/asm/vdso/vdso.h similarity index 90% rename from arch/mips/vdso/vdso.h rename to arch/mips/include/asm/vdso/vdso.h index cfb1be441deccbd5b9e35c2e008f3a50c5c4bc74..048d12fbb925aef2c0331695bce49a531477cf74 100644 --- a/arch/mips/vdso/vdso.h +++ b/arch/mips/include/asm/vdso/vdso.h @@ -72,14 +72,14 @@ static inline unsigned long get_vdso_base(void) return addr; } -static inline const union mips_vdso_data *get_vdso_data(void) +static inline const struct vdso_data *get_vdso_data(void) { - return (const union mips_vdso_data *)(get_vdso_base() - PAGE_SIZE); + return (const struct vdso_data *)(get_vdso_base() - PAGE_SIZE); } #ifdef CONFIG_CLKSRC_MIPS_GIC -static inline void __iomem *get_gic(const union mips_vdso_data *data) +static inline void __iomem *get_gic(const struct vdso_data *data) { return (void __iomem *)data - PAGE_SIZE; } diff --git a/arch/mips/include/asm/vdso/vsyscall.h b/arch/mips/include/asm/vdso/vsyscall.h new file mode 100644 index 0000000000000000000000000000000000000000..195314732233fe3eaf949e53648302eea6b66d06 --- /dev/null +++ b/arch/mips/include/asm/vdso/vsyscall.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_VDSO_VSYSCALL_H +#define __ASM_VDSO_VSYSCALL_H + +#ifndef __ASSEMBLY__ + +#include +#include + +extern struct vdso_data *vdso_data; + +/* + * Update the vDSO data page to keep in sync with kernel timekeeping. + */ +static __always_inline +struct vdso_data *__mips_get_k_vdso_data(void) +{ + return vdso_data; +} +#define __arch_get_k_vdso_data __mips_get_k_vdso_data + +static __always_inline +int __mips_get_clock_mode(struct timekeeper *tk) +{ + u32 clock_mode = tk->tkr_mono.clock->archdata.vdso_clock_mode; + + return clock_mode; +} +#define __arch_get_clock_mode __mips_get_clock_mode + +static __always_inline +int __mips_use_vsyscall(struct vdso_data *vdata) +{ + return (vdata[CS_HRES_COARSE].clock_mode != VDSO_CLOCK_NONE); +} +#define __arch_use_vsyscall __mips_use_vsyscall + +/* The asm-generic header needs to be included after the definitions above */ +#include + +#endif /* !__ASSEMBLY__ */ + +#endif /* __ASM_VDSO_VSYSCALL_H */ diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c index 9df3ebdc7b0f7164730b4211fa2c62c7aaeec312..157ee8045035189c59e4bb1b8767286bf950a944 100644 --- a/arch/mips/kernel/vdso.c +++ b/arch/mips/kernel/vdso.c @@ -24,9 +24,12 @@ #include #include #include +#include +#include /* Kernel-provided data used by the VDSO. */ -static union mips_vdso_data vdso_data __page_aligned_data; +static union mips_vdso_data mips_vdso_data __page_aligned_data; +struct vdso_data *vdso_data = mips_vdso_data.data; /* * Mapping for the VDSO data/GIC pages. The real pages are mapped manually, as @@ -70,34 +73,6 @@ static int __init init_vdso(void) } subsys_initcall(init_vdso); -void update_vsyscall(struct timekeeper *tk) -{ - vdso_data_write_begin(&vdso_data); - - vdso_data.xtime_sec = tk->xtime_sec; - vdso_data.xtime_nsec = tk->tkr_mono.xtime_nsec; - vdso_data.wall_to_mono_sec = tk->wall_to_monotonic.tv_sec; - vdso_data.wall_to_mono_nsec = tk->wall_to_monotonic.tv_nsec; - vdso_data.cs_shift = tk->tkr_mono.shift; - - vdso_data.clock_mode = tk->tkr_mono.clock->archdata.vdso_clock_mode; - if (vdso_data.clock_mode != VDSO_CLOCK_NONE) { - vdso_data.cs_mult = tk->tkr_mono.mult; - vdso_data.cs_cycle_last = tk->tkr_mono.cycle_last; - vdso_data.cs_mask = tk->tkr_mono.mask; - } - - vdso_data_write_end(&vdso_data); -} - -void update_vsyscall_tz(void) -{ - if (vdso_data.clock_mode != VDSO_CLOCK_NONE) { - vdso_data.tz_minuteswest = sys_tz.tz_minuteswest; - vdso_data.tz_dsttime = sys_tz.tz_dsttime; - } -} - static unsigned long vdso_base(void) { unsigned long base; @@ -167,7 +142,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) */ if (cpu_has_dc_aliases) { base = __ALIGN_MASK(base, shm_align_mask); - base += ((unsigned long)&vdso_data - gic_size) & shm_align_mask; + base += ((unsigned long)vdso_data - gic_size) & shm_align_mask; } data_addr = base + gic_size; @@ -193,7 +168,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) /* Map data page. */ ret = remap_pfn_range(vma, data_addr, - virt_to_phys(&vdso_data) >> PAGE_SHIFT, + virt_to_phys(vdso_data) >> PAGE_SHIFT, PAGE_SIZE, PAGE_READONLY); if (ret) goto out; diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 3944c49eee0c4c2ebb77e62a686e05efd3e37399..620abc96862499d345c2a7e09466b7ab10d4b9ab 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -1479,6 +1479,7 @@ static void build_r4000_tlb_refill_handler(void) static void setup_pw(void) { + unsigned int pwctl; unsigned long pgd_i, pgd_w; #ifndef __PAGETABLE_PMD_FOLDED unsigned long pmd_i, pmd_w; @@ -1505,6 +1506,7 @@ static void setup_pw(void) pte_i = ilog2(_PAGE_GLOBAL); pte_w = 0; + pwctl = 1 << 30; /* Set PWDirExt */ #ifndef __PAGETABLE_PMD_FOLDED write_c0_pwfield(pgd_i << 24 | pmd_i << 12 | pt_i << 6 | pte_i); @@ -1515,8 +1517,9 @@ static void setup_pw(void) #endif #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT - write_c0_pwctl(1 << 6 | psn); + pwctl |= (1 << 6 | psn); #endif + write_c0_pwctl(pwctl); write_c0_kpgd((long)swapper_pg_dir); kscratch_used_mask |= (1 << 7); /* KScratch6 is used for KPGD */ } diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile index c99fa1c1bd9ccec73423197edc3a5c201ca0aa3e..920ec7caee96e46f3c6ca818afb34e7f73e59114 100644 --- a/arch/mips/vdso/Makefile +++ b/arch/mips/vdso/Makefile @@ -1,6 +1,12 @@ # SPDX-License-Identifier: GPL-2.0 # Objects to go into the VDSO. -obj-vdso-y := elf.o gettimeofday.o sigreturn.o + +# Absolute relocation type $(ARCH_REL_TYPE_ABS) needs to be defined before +# the inclusion of generic Makefile. +ARCH_REL_TYPE_ABS := R_MIPS_JUMP_SLOT|R_MIPS_GLOB_DAT +include $(srctree)/lib/vdso/Makefile + +obj-vdso-y := elf.o vgettimeofday.o sigreturn.o # Common compiler flags between ABIs. ccflags-vdso := \ @@ -16,15 +22,31 @@ ifeq ($(cc-name),clang) ccflags-vdso += $(filter --target=%,$(KBUILD_CFLAGS)) endif +# +# The -fno-jump-tables flag only prevents the compiler from generating +# jump tables but does not prevent the compiler from emitting absolute +# offsets. cflags-vdso := $(ccflags-vdso) \ $(filter -W%,$(filter-out -Wa$(comma)%,$(KBUILD_CFLAGS))) \ - -O2 -g -fPIC -fno-strict-aliasing -fno-common -fno-builtin -G 0 \ - -DDISABLE_BRANCH_PROFILING \ + -O3 -g -fPIC -fno-strict-aliasing -fno-common -fno-builtin -G 0 \ + -fno-stack-protector -fno-jump-tables -DDISABLE_BRANCH_PROFILING \ $(call cc-option, -fno-asynchronous-unwind-tables) \ $(call cc-option, -fno-stack-protector) aflags-vdso := $(ccflags-vdso) \ -D__ASSEMBLY__ -Wa,-gdwarf-2 +ifneq ($(c-gettimeofday-y),) +CFLAGS_vgettimeofday.o = -include $(c-gettimeofday-y) + +# config-n32-o32-env.c prepares the environment to build a 32bit vDSO +# library on a 64bit kernel. +# Note: Needs to be included before than the generic library. +CFLAGS_vgettimeofday-o32.o = -include $(srctree)/$(src)/config-n32-o32-env.c -include $(c-gettimeofday-y) +CFLAGS_vgettimeofday-n32.o = -include $(srctree)/$(src)/config-n32-o32-env.c -include $(c-gettimeofday-y) +endif + +CFLAGS_REMOVE_vgettimeofday.o = -pg + # # For the pre-R6 code in arch/mips/vdso/vdso.h for locating # the base address of VDSO, the linker will emit a R_MIPS_PC32 @@ -51,17 +73,22 @@ VDSO_LDFLAGS := \ $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \ $(call cc-ldoption, -Wl$(comma)--build-id) +CFLAGS_REMOVE_vdso.o = -pg + GCOV_PROFILE := n # # Shared build commands. # +quiet_cmd_vdsold_and_vdso_check = LD $@ + cmd_vdsold_and_vdso_check = $(cmd_vdsold); $(cmd_vdso_check) + quiet_cmd_vdsold = VDSO $@ cmd_vdsold = $(CC) $(c_flags) $(VDSO_LDFLAGS) \ -Wl,-T $(filter %.lds,$^) $(filter %.o,$^) -o $@ -quiet_cmd_vdsoas_o_S = AS $@ +quiet_cmd_vdsoas_o_S = AS $@ cmd_vdsoas_o_S = $(CC) $(a_flags) -c -o $@ $< # Strip rule for the raw .so files @@ -97,7 +124,7 @@ $(obj-vdso): KBUILD_AFLAGS := $(aflags-vdso) $(native-abi) $(obj)/vdso.lds: KBUILD_CPPFLAGS := $(ccflags-vdso) $(native-abi) $(obj)/vdso.so.dbg.raw: $(obj)/vdso.lds $(obj-vdso) FORCE - $(call if_changed,vdsold) + $(call if_changed,vdsold_and_vdso_check) $(obj)/vdso-image.c: $(obj)/vdso.so.dbg.raw $(obj)/vdso.so.raw \ $(obj)/genvdso FORCE @@ -135,7 +162,7 @@ $(obj)/vdso-o32.lds: $(src)/vdso.lds.S FORCE $(call if_changed_dep,cpp_lds_S) $(obj)/vdso-o32.so.dbg.raw: $(obj)/vdso-o32.lds $(obj-vdso-o32) FORCE - $(call if_changed,vdsold) + $(call if_changed,vdsold_and_vdso_check) $(obj)/vdso-o32-image.c: VDSO_NAME := o32 $(obj)/vdso-o32-image.c: $(obj)/vdso-o32.so.dbg.raw $(obj)/vdso-o32.so.raw \ @@ -175,7 +202,7 @@ $(obj)/vdso-n32.lds: $(src)/vdso.lds.S FORCE $(call if_changed_dep,cpp_lds_S) $(obj)/vdso-n32.so.dbg.raw: $(obj)/vdso-n32.lds $(obj-vdso-n32) FORCE - $(call if_changed,vdsold) + $(call if_changed,vdsold_and_vdso_check) $(obj)/vdso-n32-image.c: VDSO_NAME := n32 $(obj)/vdso-n32-image.c: $(obj)/vdso-n32.so.dbg.raw $(obj)/vdso-n32.so.raw \ diff --git a/arch/mips/vdso/config-n32-o32-env.c b/arch/mips/vdso/config-n32-o32-env.c new file mode 100644 index 0000000000000000000000000000000000000000..68e9aaf6d8c733e3266f9e67f0765a7b4f59955f --- /dev/null +++ b/arch/mips/vdso/config-n32-o32-env.c @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Configuration file for O32 and N32 binaries. + * Note: To be included before lib/vdso/gettimeofday.c + */ +#if defined(CONFIG_MIPS32_O32) || defined(CONFIG_MIPS32_N32) +/* + * In case of a 32 bit VDSO for a 64 bit kernel fake a 32 bit kernel + * configuration. + */ +#undef CONFIG_64BIT + +#define BUILD_VDSO32 +#define CONFIG_32BIT 1 +#define CONFIG_GENERIC_ATOMIC64 1 + +#endif + diff --git a/arch/mips/vdso/elf.S b/arch/mips/vdso/elf.S index 428a1917afc6b4516ed40e25faec50595f2afbd8..c0c85d1260949fe1809e75e66f7bf2d28f71d674 100644 --- a/arch/mips/vdso/elf.S +++ b/arch/mips/vdso/elf.S @@ -8,7 +8,7 @@ * option) any later version. */ -#include "vdso.h" +#include #include diff --git a/arch/mips/vdso/sigreturn.S b/arch/mips/vdso/sigreturn.S index 30c6219912ac17224ccb0d033739821c34d8005d..c2b05956e4cbb500dfa6c7ceee0ca40411873b94 100644 --- a/arch/mips/vdso/sigreturn.S +++ b/arch/mips/vdso/sigreturn.S @@ -8,7 +8,7 @@ * option) any later version. */ -#include "vdso.h" +#include #include diff --git a/arch/mips/vdso/vdso.lds.S b/arch/mips/vdso/vdso.lds.S index 8df7dd53e8e085ae4955ab0bc2f8ff75fa47825d..659fe0c3750ad4be40aa1eba1a02e96c06713652 100644 --- a/arch/mips/vdso/vdso.lds.S +++ b/arch/mips/vdso/vdso.lds.S @@ -99,6 +99,10 @@ VERSION global: __vdso_clock_gettime; __vdso_gettimeofday; + __vdso_clock_getres; +#if _MIPS_SIM != _MIPS_SIM_ABI64 + __vdso_clock_gettime64; +#endif #endif local: *; }; diff --git a/arch/mips/vdso/vgettimeofday.c b/arch/mips/vdso/vgettimeofday.c new file mode 100644 index 0000000000000000000000000000000000000000..6b83b6376a4b58d340f1c464cbbf402c588caf73 --- /dev/null +++ b/arch/mips/vdso/vgettimeofday.c @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * MIPS64 and compat userspace implementations of gettimeofday() + * and similar. + * + * Copyright (C) 2015 Imagination Technologies + * Copyright (C) 2018 ARM Limited + * + */ +#include +#include + +#if _MIPS_SIM != _MIPS_SIM_ABI64 +int __vdso_clock_gettime(clockid_t clock, + struct old_timespec32 *ts) +{ + return __cvdso_clock_gettime32(clock, ts); +} + +#ifdef CONFIG_MIPS_CLOCK_VSYSCALL + +/* + * This is behind the ifdef so that we don't provide the symbol when there's no + * possibility of there being a usable clocksource, because there's nothing we + * can do without it. When libc fails the symbol lookup it should fall back on + * the standard syscall path. + */ +int __vdso_gettimeofday(struct __kernel_old_timeval *tv, + struct timezone *tz) +{ + return __cvdso_gettimeofday(tv, tz); +} + +#endif /* CONFIG_MIPS_CLOCK_VSYSCALL */ + +int __vdso_clock_getres(clockid_t clock_id, + struct old_timespec32 *res) +{ + return __cvdso_clock_getres_time32(clock_id, res); +} + +int __vdso_clock_gettime64(clockid_t clock, + struct __kernel_timespec *ts) +{ + return __cvdso_clock_gettime(clock, ts); +} + +#else + +int __vdso_clock_gettime(clockid_t clock, + struct __kernel_timespec *ts) +{ + return __cvdso_clock_gettime(clock, ts); +} + +#ifdef CONFIG_MIPS_CLOCK_VSYSCALL + +/* + * This is behind the ifdef so that we don't provide the symbol when there's no + * possibility of there being a usable clocksource, because there's nothing we + * can do without it. When libc fails the symbol lookup it should fall back on + * the standard syscall path. + */ +int __vdso_gettimeofday(struct __kernel_old_timeval *tv, + struct timezone *tz) +{ + return __cvdso_gettimeofday(tv, tz); +} + +#endif /* CONFIG_MIPS_CLOCK_VSYSCALL */ + +int __vdso_clock_getres(clockid_t clock_id, + struct __kernel_timespec *res) +{ + return __cvdso_clock_getres(clock_id, res); +} + +#endif diff --git a/arch/nds32/include/asm/vdso_datapage.h b/arch/nds32/include/asm/vdso_datapage.h index 79db5a12ca5eb660d2a3ae7873c4872cd2c415f2..34d80548297f63ae908db6cbdb2d98f0f314c770 100644 --- a/arch/nds32/include/asm/vdso_datapage.h +++ b/arch/nds32/include/asm/vdso_datapage.h @@ -20,6 +20,7 @@ struct vdso_data { u32 xtime_clock_sec; /* CLOCK_REALTIME - seconds */ u32 cs_mult; /* clocksource multiplier */ u32 cs_shift; /* Cycle to nanosecond divisor (power of two) */ + u32 hrtimer_res; /* hrtimer resolution */ u64 cs_cycle_last; /* last cycle value */ u64 cs_mask; /* clocksource mask */ diff --git a/arch/nds32/kernel/vdso.c b/arch/nds32/kernel/vdso.c index 016f15891f6d40c22a908a6da6c719c5a3c6503a..90bcae6f855448da7775863d9d172ce0fbaae9b4 100644 --- a/arch/nds32/kernel/vdso.c +++ b/arch/nds32/kernel/vdso.c @@ -220,6 +220,7 @@ void update_vsyscall(struct timekeeper *tk) vdso_data->xtime_coarse_sec = tk->xtime_sec; vdso_data->xtime_coarse_nsec = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift; + vdso_data->hrtimer_res = hrtimer_resolution; vdso_write_end(vdso_data); } diff --git a/arch/nds32/kernel/vdso/gettimeofday.c b/arch/nds32/kernel/vdso/gettimeofday.c index 038721af40e37d91213f8de6bd636b8c03cfeae8..b02581891c33c7288d1db33a58dbeff5eb1ad6cf 100644 --- a/arch/nds32/kernel/vdso/gettimeofday.c +++ b/arch/nds32/kernel/vdso/gettimeofday.c @@ -208,6 +208,8 @@ static notrace int clock_getres_fallback(clockid_t _clk_id, notrace int __vdso_clock_getres(clockid_t clk_id, struct timespec *res) { + struct vdso_data *vdata = __get_datapage(); + if (res == NULL) return 0; switch (clk_id) { @@ -215,7 +217,7 @@ notrace int __vdso_clock_getres(clockid_t clk_id, struct timespec *res) case CLOCK_MONOTONIC: case CLOCK_MONOTONIC_RAW: res->tv_sec = 0; - res->tv_nsec = CLOCK_REALTIME_RES; + res->tv_nsec = vdata->hrtimer_res; break; case CLOCK_REALTIME_COARSE: case CLOCK_MONOTONIC_COARSE: diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h index 9a3798660cefa34a47a1466c03e0aaf21d495749..fc68c0fc08b58ba3c82ab89c25d7bad57e45b954 100644 --- a/arch/powerpc/include/asm/book3s/64/hash-4k.h +++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h @@ -145,6 +145,12 @@ extern pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm, extern int hash__has_transparent_hugepage(void); #endif +static inline pmd_t hash__pmd_mkdevmap(pmd_t pmd) +{ + BUG(); + return pmd; +} + #endif /* !__ASSEMBLY__ */ #endif /* _ASM_POWERPC_BOOK3S_64_HASH_4K_H */ diff --git a/arch/powerpc/include/asm/book3s/64/hash-64k.h b/arch/powerpc/include/asm/book3s/64/hash-64k.h index f82ee8a3b561718c4a138b392fe6bfeb2990aea4..790e4a946f6eace5e657e8f9ea0ee0bef89f6367 100644 --- a/arch/powerpc/include/asm/book3s/64/hash-64k.h +++ b/arch/powerpc/include/asm/book3s/64/hash-64k.h @@ -233,7 +233,7 @@ static inline void mark_hpte_slot_valid(unsigned char *hpte_slot_array, */ static inline int hash__pmd_trans_huge(pmd_t pmd) { - return !!((pmd_val(pmd) & (_PAGE_PTE | H_PAGE_THP_HUGE)) == + return !!((pmd_val(pmd) & (_PAGE_PTE | H_PAGE_THP_HUGE | _PAGE_DEVMAP)) == (_PAGE_PTE | H_PAGE_THP_HUGE)); } @@ -259,6 +259,12 @@ extern pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp); extern int hash__has_transparent_hugepage(void); #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + +static inline pmd_t hash__pmd_mkdevmap(pmd_t pmd) +{ + return __pmd(pmd_val(pmd) | (_PAGE_PTE | H_PAGE_THP_HUGE | _PAGE_DEVMAP)); +} + #endif /* __ASSEMBLY__ */ #endif /* _ASM_POWERPC_BOOK3S_64_HASH_64K_H */ diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index 855dbae6d351d40f7cc013e79793ff915adfa908..2aea6efc2e63ddaddc793c2ff65a64cc1141b94c 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -1253,7 +1253,9 @@ extern void serialize_against_pte_lookup(struct mm_struct *mm); static inline pmd_t pmd_mkdevmap(pmd_t pmd) { - return __pmd(pmd_val(pmd) | (_PAGE_PTE | _PAGE_DEVMAP)); + if (radix_enabled()) + return radix__pmd_mkdevmap(pmd); + return hash__pmd_mkdevmap(pmd); } static inline int pmd_devmap(pmd_t pmd) diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h index 7d1a3d1543fc0fc2d699394ac9d0cbf4cd76f9bb..da01badef0cbc4a5a6c6c885e093aae59bc6de43 100644 --- a/arch/powerpc/include/asm/book3s/64/radix.h +++ b/arch/powerpc/include/asm/book3s/64/radix.h @@ -255,6 +255,11 @@ extern pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm, extern int radix__has_transparent_hugepage(void); #endif +static inline pmd_t radix__pmd_mkdevmap(pmd_t pmd) +{ + return __pmd(pmd_val(pmd) | (_PAGE_PTE | _PAGE_DEVMAP)); +} + extern int __meminit radix__vmemmap_create_mapping(unsigned long start, unsigned long page_size, unsigned long phys); diff --git a/arch/powerpc/include/asm/drmem.h b/arch/powerpc/include/asm/drmem.h index 7c1d8e74b25d4f18ca31fdfa1dd773ab47a0e542..9e516fe3daabaa4ae7d513f6c8df7d928e727491 100644 --- a/arch/powerpc/include/asm/drmem.h +++ b/arch/powerpc/include/asm/drmem.h @@ -28,12 +28,12 @@ struct drmem_lmb_info { extern struct drmem_lmb_info *drmem_info; #define for_each_drmem_lmb_in_range(lmb, start, end) \ - for ((lmb) = (start); (lmb) <= (end); (lmb)++) + for ((lmb) = (start); (lmb) < (end); (lmb)++) #define for_each_drmem_lmb(lmb) \ for_each_drmem_lmb_in_range((lmb), \ &drmem_info->lmbs[0], \ - &drmem_info->lmbs[drmem_info->n_lmbs - 1]) + &drmem_info->lmbs[drmem_info->n_lmbs]) /* * The of_drconf_cell_v1 struct defines the layout of the LMB data diff --git a/arch/powerpc/include/asm/setjmp.h b/arch/powerpc/include/asm/setjmp.h index 279d03a1eec625b12d93632ce7586796835bee97..6941fe202bc822b57ef115d2e51d97c8ed83e576 100644 --- a/arch/powerpc/include/asm/setjmp.h +++ b/arch/powerpc/include/asm/setjmp.h @@ -12,7 +12,9 @@ #define JMP_BUF_LEN 23 -extern long setjmp(long *); -extern void longjmp(long *, long); +typedef long jmp_buf[JMP_BUF_LEN]; + +extern int setjmp(jmp_buf env) __attribute__((returns_twice)); +extern void longjmp(jmp_buf env, int val) __attribute__((noreturn)); #endif /* _ASM_POWERPC_SETJMP_H */ diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index d450280e5c29c1b5e1260d55fd6a98ac1be55fda..1e64cfe22a83e09b763c156720127ca7b030861b 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -5,9 +5,6 @@ CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"' -# Avoid clang warnings around longjmp/setjmp declarations -CFLAGS_crash.o += -ffreestanding - subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror ifdef CONFIG_PPC64 diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S index 36178000a2f259a1d0dc47f7f561aa7affbf3a55..4a860d3b922962295dbec0764a734e61b1f63c03 100644 --- a/arch/powerpc/kernel/idle_book3s.S +++ b/arch/powerpc/kernel/idle_book3s.S @@ -170,8 +170,11 @@ core_idle_lock_held: bne- core_idle_lock_held blr -/* Reuse an unused pt_regs slot for IAMR */ +/* Reuse some unused pt_regs slots for AMR/IAMR/UAMOR/UAMOR */ +#define PNV_POWERSAVE_AMR _TRAP #define PNV_POWERSAVE_IAMR _DAR +#define PNV_POWERSAVE_UAMOR _DSISR +#define PNV_POWERSAVE_AMOR RESULT /* * Pass requested state in r3: @@ -205,8 +208,16 @@ pnv_powersave_common: SAVE_NVGPRS(r1) BEGIN_FTR_SECTION + mfspr r4, SPRN_AMR mfspr r5, SPRN_IAMR + mfspr r6, SPRN_UAMOR + std r4, PNV_POWERSAVE_AMR(r1) std r5, PNV_POWERSAVE_IAMR(r1) + std r6, PNV_POWERSAVE_UAMOR(r1) +BEGIN_FTR_SECTION_NESTED(42) + mfspr r7, SPRN_AMOR + std r7, PNV_POWERSAVE_AMOR(r1) +END_FTR_SECTION_NESTED_IFSET(CPU_FTR_HVMODE, 42) END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) mfcr r5 @@ -935,12 +946,20 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) REST_GPR(2, r1) BEGIN_FTR_SECTION - /* IAMR was saved in pnv_powersave_common() */ + /* These regs were saved in pnv_powersave_common() */ + ld r4, PNV_POWERSAVE_AMR(r1) ld r5, PNV_POWERSAVE_IAMR(r1) + ld r6, PNV_POWERSAVE_UAMOR(r1) + mtspr SPRN_AMR, r4 mtspr SPRN_IAMR, r5 + mtspr SPRN_UAMOR, r6 +BEGIN_FTR_SECTION_NESTED(42) + ld r7, PNV_POWERSAVE_AMOR(r1) + mtspr SPRN_AMOR, r7 +END_FTR_SECTION_NESTED_IFSET(CPU_FTR_HVMODE, 42) /* - * We don't need an isync here because the upcoming mtmsrd is - * execution synchronizing. + * We don't need an isync here after restoring IAMR because the upcoming + * mtmsrd is execution synchronizing. */ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index 5c60bb0f927f819fea2d57aa98d7ad0a7eec5ebf..53a39661eb13b5c1e8721f089cc112323648c07e 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c @@ -277,6 +277,9 @@ int kprobe_handler(struct pt_regs *regs) if (user_mode(regs)) return 0; + if (!(regs->msr & MSR_IR) || !(regs->msr & MSR_DR)) + return 0; + /* * We don't want to be preempted for the entire * duration of kprobe processing diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c index c101b321dece8480f3474d709e7d10fcc17bc1e6..7765837ba2037914a63214b2d260f43b9f9e0a92 100644 --- a/arch/powerpc/kernel/pci_of_scan.c +++ b/arch/powerpc/kernel/pci_of_scan.c @@ -82,10 +82,16 @@ static void of_pci_parse_addrs(struct device_node *node, struct pci_dev *dev) const __be32 *addrs; u32 i; int proplen; + bool mark_unset = false; addrs = of_get_property(node, "assigned-addresses", &proplen); - if (!addrs) - return; + if (!addrs || !proplen) { + addrs = of_get_property(node, "reg", &proplen); + if (!addrs || !proplen) + return; + mark_unset = true; + } + pr_debug(" parse addresses (%d bytes) @ %p\n", proplen, addrs); for (; proplen >= 20; proplen -= 20, addrs += 5) { flags = pci_parse_of_flags(of_read_number(addrs, 1), 0); @@ -110,6 +116,8 @@ static void of_pci_parse_addrs(struct device_node *node, struct pci_dev *dev) continue; } res->flags = flags; + if (mark_unset) + res->flags |= IORESOURCE_UNSET; res->name = pci_name(dev); region.start = base; region.end = base + size - 1; diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index eaf7300be5ab5dddcc3c8d316c9b2973d783c170..bd4996958b13d464486ca57760bee43e5ebba72f 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -518,6 +518,8 @@ static bool __init parse_cache_info(struct device_node *np, lsizep = of_get_property(np, propnames[3], NULL); if (bsizep == NULL) bsizep = lsizep; + if (lsizep == NULL) + lsizep = bsizep; if (lsizep != NULL) lsize = be32_to_cpu(*lsizep); if (bsizep != NULL) diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index b088b0700d0dd4d62a492c984b1e8366e05e6f0d..7aba592bba3673d71c0a400520d01aca9bceb694 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c @@ -477,8 +477,10 @@ static long restore_tm_sigcontexts(struct task_struct *tsk, err |= __get_user(tsk->thread.ckpt_regs.ccr, &sc->gp_regs[PT_CCR]); + /* Don't allow userspace to set the trap value */ + regs->trap = 0; + /* These regs are not checkpointed; they can go in 'regs'. */ - err |= __get_user(regs->trap, &sc->gp_regs[PT_TRAP]); err |= __get_user(regs->dar, &sc->gp_regs[PT_DAR]); err |= __get_user(regs->dsisr, &sc->gp_regs[PT_DSISR]); err |= __get_user(regs->result, &sc->gp_regs[PT_RESULT]); diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 5449e76cf2dfd5d37bc213fdfc0e192a42ff15ec..f6c21f6af274e6f28ca49dac539a049ff914f09d 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -492,35 +492,6 @@ static inline void clear_irq_work_pending(void) "i" (offsetof(struct paca_struct, irq_work_pending))); } -void arch_irq_work_raise(void) -{ - preempt_disable(); - set_irq_work_pending_flag(); - /* - * Non-nmi code running with interrupts disabled will replay - * irq_happened before it re-enables interrupts, so setthe - * decrementer there instead of causing a hardware exception - * which would immediately hit the masked interrupt handler - * and have the net effect of setting the decrementer in - * irq_happened. - * - * NMI interrupts can not check this when they return, so the - * decrementer hardware exception is raised, which will fire - * when interrupts are next enabled. - * - * BookE does not support this yet, it must audit all NMI - * interrupt handlers to ensure they call nmi_enter() so this - * check would be correct. - */ - if (IS_ENABLED(CONFIG_BOOKE) || !irqs_disabled() || in_nmi()) { - set_dec(1); - } else { - hard_irq_disable(); - local_paca->irq_happened |= PACA_IRQ_DEC; - } - preempt_enable(); -} - #else /* 32-bit */ DEFINE_PER_CPU(u8, irq_work_pending); @@ -529,16 +500,27 @@ DEFINE_PER_CPU(u8, irq_work_pending); #define test_irq_work_pending() __this_cpu_read(irq_work_pending) #define clear_irq_work_pending() __this_cpu_write(irq_work_pending, 0) +#endif /* 32 vs 64 bit */ + void arch_irq_work_raise(void) { + /* + * 64-bit code that uses irq soft-mask can just cause an immediate + * interrupt here that gets soft masked, if this is called under + * local_irq_disable(). It might be possible to prevent that happening + * by noticing interrupts are disabled and setting decrementer pending + * to be replayed when irqs are enabled. The problem there is that + * tracing can call irq_work_raise, including in code that does low + * level manipulations of irq soft-mask state (e.g., trace_hardirqs_on) + * which could get tangled up if we're messing with the same state + * here. + */ preempt_disable(); set_irq_work_pending_flag(); set_dec(1); preempt_enable(); } -#endif /* 32 vs 64 bit */ - #else /* CONFIG_IRQ_WORK */ #define test_irq_work_pending() 0 diff --git a/arch/powerpc/mm/tlb_nohash_low.S b/arch/powerpc/mm/tlb_nohash_low.S index e066a658acac6f17872bf146fd884ef93e1f1723..56f58a362ea56bd2ed69287e44a692d23b7ca433 100644 --- a/arch/powerpc/mm/tlb_nohash_low.S +++ b/arch/powerpc/mm/tlb_nohash_low.S @@ -402,7 +402,7 @@ _GLOBAL(set_context) * extern void loadcam_entry(unsigned int index) * * Load TLBCAM[index] entry in to the L2 CAM MMU - * Must preserve r7, r8, r9, and r10 + * Must preserve r7, r8, r9, r10 and r11 */ _GLOBAL(loadcam_entry) mflr r5 @@ -438,6 +438,10 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_BIG_PHYS) */ _GLOBAL(loadcam_multi) mflr r8 + /* Don't switch to AS=1 if already there */ + mfmsr r11 + andi. r11,r11,MSR_IS + bne 10f /* * Set up temporary TLB entry that is the same as what we're @@ -463,6 +467,7 @@ _GLOBAL(loadcam_multi) mtmsr r6 isync +10: mr r9,r3 add r10,r3,r4 2: bl loadcam_entry @@ -471,6 +476,10 @@ _GLOBAL(loadcam_multi) mr r3,r9 blt 2b + /* Don't return to AS=0 if we were in AS=1 at function start */ + andi. r11,r11,MSR_IS + bne 3f + /* Return to AS=0 and clear the temporary entry */ mfmsr r6 rlwinm. r6,r6,0,~(MSR_IS|MSR_DS) @@ -486,6 +495,7 @@ _GLOBAL(loadcam_multi) tlbwe isync +3: mtlr r8 blr #endif diff --git a/arch/powerpc/platforms/maple/setup.c b/arch/powerpc/platforms/maple/setup.c index b7f937563827d91a3f0e331d1b87f5a7ed536787..d1fee2d35b49c6d24552b52728d37916f8046b2d 100644 --- a/arch/powerpc/platforms/maple/setup.c +++ b/arch/powerpc/platforms/maple/setup.c @@ -299,23 +299,6 @@ static int __init maple_probe(void) return 1; } -define_machine(maple) { - .name = "Maple", - .probe = maple_probe, - .setup_arch = maple_setup_arch, - .init_IRQ = maple_init_IRQ, - .pci_irq_fixup = maple_pci_irq_fixup, - .pci_get_legacy_ide_irq = maple_pci_get_legacy_ide_irq, - .restart = maple_restart, - .halt = maple_halt, - .get_boot_time = maple_get_boot_time, - .set_rtc_time = maple_set_rtc_time, - .get_rtc_time = maple_get_rtc_time, - .calibrate_decr = generic_calibrate_decr, - .progress = maple_progress, - .power_save = power4_idle, -}; - #ifdef CONFIG_EDAC /* * Register a platform device for CPC925 memory controller on @@ -372,3 +355,20 @@ static int __init maple_cpc925_edac_setup(void) } machine_device_initcall(maple, maple_cpc925_edac_setup); #endif + +define_machine(maple) { + .name = "Maple", + .probe = maple_probe, + .setup_arch = maple_setup_arch, + .init_IRQ = maple_init_IRQ, + .pci_irq_fixup = maple_pci_irq_fixup, + .pci_get_legacy_ide_irq = maple_pci_get_legacy_ide_irq, + .restart = maple_restart, + .halt = maple_halt, + .get_boot_time = maple_get_boot_time, + .set_rtc_time = maple_set_rtc_time, + .get_rtc_time = maple_get_rtc_time, + .calibrate_decr = generic_calibrate_decr, + .progress = maple_progress, + .power_save = power4_idle, +}; diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c index fc01a2c0f8ed43253e610b427710f3df84cce45f..b168c3742b431c97e27423fddd0cb646c6f1c9c6 100644 --- a/arch/powerpc/platforms/pseries/hotplug-memory.c +++ b/arch/powerpc/platforms/pseries/hotplug-memory.c @@ -227,7 +227,7 @@ static int get_lmb_range(u32 drc_index, int n_lmbs, struct drmem_lmb **end_lmb) { struct drmem_lmb *lmb, *start, *end; - struct drmem_lmb *last_lmb; + struct drmem_lmb *limit; start = NULL; for_each_drmem_lmb(lmb) { @@ -240,10 +240,10 @@ static int get_lmb_range(u32 drc_index, int n_lmbs, if (!start) return -EINVAL; - end = &start[n_lmbs - 1]; + end = &start[n_lmbs]; - last_lmb = &drmem_info->lmbs[drmem_info->n_lmbs - 1]; - if (end > last_lmb) + limit = &drmem_info->lmbs[drmem_info->n_lmbs]; + if (end > limit) return -EINVAL; *start_lmb = start; diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index 49e3a88b6a0c1474656c3c7020ea5b3d7e8c4691..d660a90616cda4dd5a4ce46be6c7ea61f6f92ef1 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -1056,7 +1056,7 @@ static int __init vpa_debugfs_init(void) { char name[16]; long i; - static struct dentry *vpa_dir; + struct dentry *vpa_dir; if (!firmware_has_feature(FW_FEATURE_SPLPAR)) return 0; diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c index 3c939b9de488ec21c60ee9d32ef2eb6a16a5ccd8..1c31a08cdd54584250b64b14d759c6285b1e9e2c 100644 --- a/arch/powerpc/sysdev/xive/common.c +++ b/arch/powerpc/sysdev/xive/common.c @@ -72,13 +72,6 @@ static u32 xive_ipi_irq; /* Xive state for each CPU */ static DEFINE_PER_CPU(struct xive_cpu *, xive_cpu); -/* - * A "disabled" interrupt should never fire, to catch problems - * we set its logical number to this - */ -#define XIVE_BAD_IRQ 0x7fffffff -#define XIVE_MAX_IRQ (XIVE_BAD_IRQ - 1) - /* An invalid CPU target */ #define XIVE_INVALID_TARGET (-1) @@ -1074,7 +1067,7 @@ static int xive_setup_cpu_ipi(unsigned int cpu) xc = per_cpu(xive_cpu, cpu); /* Check if we are already setup */ - if (xc->hw_ipi != 0) + if (xc->hw_ipi != XIVE_BAD_IRQ) return 0; /* Grab an IPI from the backend, this will populate xc->hw_ipi */ @@ -1111,7 +1104,7 @@ static void xive_cleanup_cpu_ipi(unsigned int cpu, struct xive_cpu *xc) /* Disable the IPI and free the IRQ data */ /* Already cleaned up ? */ - if (xc->hw_ipi == 0) + if (xc->hw_ipi == XIVE_BAD_IRQ) return; /* Mask the IPI */ @@ -1267,6 +1260,7 @@ static int xive_prepare_cpu(unsigned int cpu) if (np) xc->chip_id = of_get_ibm_chip_id(np); of_node_put(np); + xc->hw_ipi = XIVE_BAD_IRQ; per_cpu(xive_cpu, cpu) = xc; } diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c index 6d5b2802245285a700924258dea315b7c3629a44..cb1f51ad48e40a3b62dc673aa5a35d17622d2f32 100644 --- a/arch/powerpc/sysdev/xive/native.c +++ b/arch/powerpc/sysdev/xive/native.c @@ -311,7 +311,7 @@ static void xive_native_put_ipi(unsigned int cpu, struct xive_cpu *xc) s64 rc; /* Free the IPI */ - if (!xc->hw_ipi) + if (xc->hw_ipi == XIVE_BAD_IRQ) return; for (;;) { rc = opal_xive_free_irq(xc->hw_ipi); @@ -319,7 +319,7 @@ static void xive_native_put_ipi(unsigned int cpu, struct xive_cpu *xc) msleep(OPAL_BUSY_DELAY_MS); continue; } - xc->hw_ipi = 0; + xc->hw_ipi = XIVE_BAD_IRQ; break; } } diff --git a/arch/powerpc/sysdev/xive/spapr.c b/arch/powerpc/sysdev/xive/spapr.c index e3ebf64693929f64da6248495a0297e980bcc1c6..5566bbc86f4afda058fc7ce77446baf11c2b07d7 100644 --- a/arch/powerpc/sysdev/xive/spapr.c +++ b/arch/powerpc/sysdev/xive/spapr.c @@ -509,11 +509,11 @@ static int xive_spapr_get_ipi(unsigned int cpu, struct xive_cpu *xc) static void xive_spapr_put_ipi(unsigned int cpu, struct xive_cpu *xc) { - if (!xc->hw_ipi) + if (xc->hw_ipi == XIVE_BAD_IRQ) return; xive_irq_bitmap_free(xc->hw_ipi); - xc->hw_ipi = 0; + xc->hw_ipi = XIVE_BAD_IRQ; } #endif /* CONFIG_SMP */ diff --git a/arch/powerpc/sysdev/xive/xive-internal.h b/arch/powerpc/sysdev/xive/xive-internal.h index f34abed0c05fd3064bf0a2c45fb193b49079283f..48808dbb25dce60df08cf38ae7456619338c26cb 100644 --- a/arch/powerpc/sysdev/xive/xive-internal.h +++ b/arch/powerpc/sysdev/xive/xive-internal.h @@ -9,6 +9,13 @@ #ifndef __XIVE_INTERNAL_H #define __XIVE_INTERNAL_H +/* + * A "disabled" interrupt should never fire, to catch problems + * we set its logical number to this + */ +#define XIVE_BAD_IRQ 0x7fffffff +#define XIVE_MAX_IRQ (XIVE_BAD_IRQ - 1) + /* Each CPU carry one of these with various per-CPU state */ struct xive_cpu { #ifdef CONFIG_SMP diff --git a/arch/powerpc/xmon/Makefile b/arch/powerpc/xmon/Makefile index 365e711bebabbb4b8209fffadcaf6ce7fb63d4af..ab193cd7dbf909ba28768dd8be12fce8e95b8f70 100644 --- a/arch/powerpc/xmon/Makefile +++ b/arch/powerpc/xmon/Makefile @@ -1,9 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 # Makefile for xmon -# Avoid clang warnings around longjmp/setjmp declarations -subdir-ccflags-y := -ffreestanding - subdir-ccflags-$(CONFIG_PPC_WERROR) += -Werror GCOV_PROFILE := n diff --git a/arch/s390/kernel/diag.c b/arch/s390/kernel/diag.c index 53a5316cc4b7e557b8eb4b71aa78738d2d2ecf11..4c7cf8787a8489af80b746af4a02cd89f949b9d7 100644 --- a/arch/s390/kernel/diag.c +++ b/arch/s390/kernel/diag.c @@ -79,7 +79,7 @@ static int show_diag_stat(struct seq_file *m, void *v) static void *show_diag_stat_start(struct seq_file *m, loff_t *pos) { - return *pos <= nr_cpu_ids ? (void *)((unsigned long) *pos + 1) : NULL; + return *pos <= NR_DIAG_STAT ? (void *)((unsigned long) *pos + 1) : NULL; } static void *show_diag_stat_next(struct seq_file *m, void *v, loff_t *pos) @@ -128,7 +128,7 @@ void diag_stat_inc(enum diag_stat_enum nr) } EXPORT_SYMBOL(diag_stat_inc); -void diag_stat_inc_norecursion(enum diag_stat_enum nr) +void notrace diag_stat_inc_norecursion(enum diag_stat_enum nr) { this_cpu_inc(diag_stat.counter[nr]); trace_s390_diagnose_norecursion(diag_map[nr].code); diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c index 5bfb1ce129f4b81994bc78e5a2188365d8cf7409..74a296cea21cc8062b9642bb9c81759df6ed23e8 100644 --- a/arch/s390/kernel/perf_cpum_sf.c +++ b/arch/s390/kernel/perf_cpum_sf.c @@ -1537,6 +1537,7 @@ static void hw_collect_aux(struct cpu_hw_sf *cpuhw) perf_aux_output_end(handle, size); num_sdb = aux->sfb.num_sdb; + num_sdb = aux->sfb.num_sdb; while (!done) { /* Get an output handle */ aux = perf_aux_output_begin(handle, cpuhw->event); diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c index 6fe2e1875058bec89419d680bef0110249ba9ed3..675d4be0c2b77f18e53865bc57c05d9244fc3137 100644 --- a/arch/s390/kernel/processor.c +++ b/arch/s390/kernel/processor.c @@ -157,8 +157,9 @@ static void show_cpu_mhz(struct seq_file *m, unsigned long n) static int show_cpuinfo(struct seq_file *m, void *v) { unsigned long n = (unsigned long) v - 1; + unsigned long first = cpumask_first(cpu_online_mask); - if (!n) + if (n == first) show_cpu_summary(m, v); if (!machine_has_cpu_mhz) return 0; @@ -171,6 +172,8 @@ static inline void *c_update(loff_t *pos) { if (*pos) *pos = cpumask_next(*pos - 1, cpu_online_mask); + else + *pos = cpumask_first(cpu_online_mask); return *pos < nr_cpu_ids ? (void *)*pos + 1 : NULL; } diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index ecd24711f3aa9caa137544b0434f23e29797c144..8e31dfd85de326371218f893761644ae57f1b4e9 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -393,7 +393,7 @@ int smp_find_processor_id(u16 address) return -1; } -bool arch_vcpu_is_preempted(int cpu) +bool notrace arch_vcpu_is_preempted(int cpu) { if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu)) return false; @@ -403,7 +403,7 @@ bool arch_vcpu_is_preempted(int cpu) } EXPORT_SYMBOL(arch_vcpu_is_preempted); -void smp_yield_cpu(int cpu) +void notrace smp_yield_cpu(int cpu) { if (MACHINE_HAS_DIAG9C) { diag_stat_inc_norecursion(DIAG_STAT_X09C); diff --git a/arch/s390/kernel/trace.c b/arch/s390/kernel/trace.c index 490b52e85014538937d0c67e73a8bf6bf11c7cf9..11a669f3cc93c17d5f69f281296e04c2a39cc2bb 100644 --- a/arch/s390/kernel/trace.c +++ b/arch/s390/kernel/trace.c @@ -14,7 +14,7 @@ EXPORT_TRACEPOINT_SYMBOL(s390_diagnose); static DEFINE_PER_CPU(unsigned int, diagnose_trace_depth); -void trace_s390_diagnose_norecursion(int diag_nr) +void notrace trace_s390_diagnose_norecursion(int diag_nr) { unsigned long flags; unsigned int *depth; diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 11c3cd906ab44a71a4701717425aace7f3676ef0..18662c1a93611147482081f714b9c9e873acb160 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -1666,6 +1666,9 @@ static int gfn_to_memslot_approx(struct kvm_memslots *slots, gfn_t gfn) start = slot + 1; } + if (start >= slots->used_slots) + return slots->used_slots - 1; + if (gfn >= memslots[start].base_gfn && gfn < memslots[start].base_gfn + memslots[start].npages) { atomic_set(&slots->lru_slot, start); diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c index a2b28cd1e3fedb2bdcc6dbb1cff530ba0e3371a7..17d73b71df1d8e460b00b1452dd35705727b7252 100644 --- a/arch/s390/kvm/vsie.c +++ b/arch/s390/kvm/vsie.c @@ -1024,6 +1024,7 @@ static int vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) scb_s->iprcc = PGM_ADDRESSING; scb_s->pgmilc = 4; scb_s->gpsw.addr = __rewind_psw(scb_s->gpsw, 4); + rc = 1; } return rc; } diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c index c4f8039a35e8dda0bc20999b7db089e3ab09b613..0267405ab7c69fec93f73df3ed35f381b03c7f38 100644 --- a/arch/s390/lib/uaccess.c +++ b/arch/s390/lib/uaccess.c @@ -64,10 +64,13 @@ mm_segment_t enable_sacf_uaccess(void) { mm_segment_t old_fs; unsigned long asce, cr; + unsigned long flags; old_fs = current->thread.mm_segment; if (old_fs & 1) return old_fs; + /* protect against a concurrent page table upgrade */ + local_irq_save(flags); current->thread.mm_segment |= 1; asce = S390_lowcore.kernel_asce; if (likely(old_fs == USER_DS)) { @@ -83,6 +86,7 @@ mm_segment_t enable_sacf_uaccess(void) __ctl_load(asce, 7, 7); set_cpu_flag(CIF_ASCE_SECONDARY); } + local_irq_restore(flags); return old_fs; } EXPORT_SYMBOL(enable_sacf_uaccess); diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c index 911c7ded35f15a2706b293ac2533b02b4906c4aa..7cde0f2f52e1439d3653de4490d62ee55954631c 100644 --- a/arch/s390/mm/gmap.c +++ b/arch/s390/mm/gmap.c @@ -787,14 +787,18 @@ static void gmap_call_notifier(struct gmap *gmap, unsigned long start, static inline unsigned long *gmap_table_walk(struct gmap *gmap, unsigned long gaddr, int level) { + const int asce_type = gmap->asce & _ASCE_TYPE_MASK; unsigned long *table; if ((gmap->asce & _ASCE_TYPE_MASK) + 4 < (level * 4)) return NULL; if (gmap_is_shadow(gmap) && gmap->removed) return NULL; - if (gaddr & (-1UL << (31 + ((gmap->asce & _ASCE_TYPE_MASK) >> 2)*11))) + + if (asce_type != _ASCE_TYPE_REGION1 && + gaddr & (-1UL << (31 + (asce_type >> 2) * 11))) return NULL; + table = gmap->table; switch (gmap->asce & _ASCE_TYPE_MASK) { case _ASCE_TYPE_REGION1: @@ -1834,6 +1838,7 @@ int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t, goto out_free; } else if (*table & _REGION_ENTRY_ORIGIN) { rc = -EAGAIN; /* Race with shadow */ + goto out_free; } crst_table_init(s_r3t, _REGION3_ENTRY_EMPTY); /* mark as invalid as long as the parent table is not protected */ diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c index 814f26520aa2c2439de4e10ce52bf0476c8f2661..f3bc9c9305da6231cedd7b0a73afce19c83836ba 100644 --- a/arch/s390/mm/pgalloc.c +++ b/arch/s390/mm/pgalloc.c @@ -72,8 +72,20 @@ static void __crst_table_upgrade(void *arg) { struct mm_struct *mm = arg; - if (current->active_mm == mm) - set_user_asce(mm); + /* we must change all active ASCEs to avoid the creation of new TLBs */ + if (current->active_mm == mm) { + S390_lowcore.user_asce = mm->context.asce; + if (current->thread.mm_segment == USER_DS) { + __ctl_load(S390_lowcore.user_asce, 1, 1); + /* Mark user-ASCE present in CR1 */ + clear_cpu_flag(CIF_ASCE_PRIMARY); + } + if (current->thread.mm_segment == USER_DS_SACF) { + __ctl_load(S390_lowcore.user_asce, 7, 7); + /* enable_sacf_uaccess does all or nothing */ + WARN_ON(!test_cpu_flag(CIF_ASCE_SECONDARY)); + } + } __tlb_flush_local(); } diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c index 83c470364dfb34dce51320322e9b119c10157f3e..748bd0921dffff852d1013847ae8cdf96b4b1353 100644 --- a/arch/um/drivers/ubd_kern.c +++ b/arch/um/drivers/ubd_kern.c @@ -1574,7 +1574,9 @@ int io_thread(void *arg) written = 0; do { - res = os_write_file(kernel_fd, ((char *) io_req_buffer) + written, n); + res = os_write_file(kernel_fd, + ((char *) io_req_buffer) + written, + n - written); if (res >= 0) { written += res; } else { diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 2b08204a4b89c7d0d555e6aade2f6f14df5adefc..7e5f12b8edf615d1fc1e1590b82ab194a6089186 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -18,6 +18,7 @@ config X86_32 select HAVE_GENERIC_DMA_COHERENT select MODULES_USE_ELF_REL select OLD_SIGACTION + select GENERIC_VDSO_32 config X86_64 def_bool y @@ -112,6 +113,7 @@ config X86 select GENERIC_STRNCPY_FROM_USER select GENERIC_STRNLEN_USER select GENERIC_TIME_VSYSCALL + select GENERIC_GETTIMEOFDAY select HARDLOCKUP_CHECK_TIMESTAMP if X86_64 select HAVE_ACPI_APEI if ACPI select HAVE_ACPI_APEI_NMI if ACPI @@ -190,6 +192,7 @@ config X86 select HAVE_SYSCALL_TRACEPOINTS select HAVE_UNSTABLE_SCHED_CLOCK select HAVE_USER_RETURN_NOTIFIER + select HAVE_GENERIC_VDSO select HOTPLUG_SMT if SMP select IRQ_FORCED_THREADING select NEED_SG_DMA_LENGTH diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S index 37380c0d59996b8ed688d0e350111343e5dca0ae..01d628ea34024c1a88192a2a19e12920bceaf196 100644 --- a/arch/x86/boot/compressed/head_32.S +++ b/arch/x86/boot/compressed/head_32.S @@ -106,7 +106,7 @@ ENTRY(startup_32) notl %eax andl %eax, %ebx cmpl $LOAD_PHYSICAL_ADDR, %ebx - jge 1f + jae 1f #endif movl $LOAD_PHYSICAL_ADDR, %ebx 1: diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index 4eaa724afce3446bd2d7b24b43aa730347e05557..9fa644c62839f663396a2d484816b6f8d6a5aa78 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -106,7 +106,7 @@ ENTRY(startup_32) notl %eax andl %eax, %ebx cmpl $LOAD_PHYSICAL_ADDR, %ebx - jge 1f + jae 1f #endif movl $LOAD_PHYSICAL_ADDR, %ebx 1: @@ -297,7 +297,7 @@ ENTRY(startup_64) notq %rax andq %rax, %rbp cmpq $LOAD_PHYSICAL_ADDR, %rbp - jge 1f + jae 1f #endif movq $LOAD_PHYSICAL_ADDR, %rbp 1: diff --git a/arch/x86/configs/gki_defconfig b/arch/x86/configs/gki_defconfig index ad00d38cad980dad7583a876faec1086c223ce3f..7cea113ebea3c4cc5f765a0a6637415a22a8fee8 100644 --- a/arch/x86/configs/gki_defconfig +++ b/arch/x86/configs/gki_defconfig @@ -3,6 +3,7 @@ CONFIG_AUDIT=y CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y CONFIG_PREEMPT=y +CONFIG_IRQ_TIME_ACCOUNTING=y CONFIG_TASKSTATS=y CONFIG_TASK_XACCT=y CONFIG_TASK_IO_ACCOUNTING=y @@ -14,12 +15,12 @@ CONFIG_CGROUPS=y CONFIG_MEMCG=y CONFIG_MEMCG_SWAP=y CONFIG_CGROUP_SCHED=y +# CONFIG_FAIR_GROUP_SCHED is not set CONFIG_CGROUP_FREEZER=y CONFIG_CPUSETS=y CONFIG_CGROUP_CPUACCT=y CONFIG_CGROUP_BPF=y CONFIG_NAMESPACES=y -# CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_SCHED_TUNE=y CONFIG_BLK_DEV_INITRD=y @@ -45,6 +46,7 @@ CONFIG_EFI=y CONFIG_PM_WAKELOCKS=y CONFIG_PM_WAKELOCKS_LIMIT=0 # CONFIG_PM_WAKELOCKS_GC is not set +CONFIG_CPU_FREQ_STAT=y CONFIG_CPU_FREQ_TIMES=y CONFIG_CPU_FREQ_GOV_POWERSAVE=y CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y @@ -75,7 +77,9 @@ CONFIG_INET=y CONFIG_IP_MULTICAST=y CONFIG_IP_ADVANCED_ROUTER=y CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_NET_IPIP=y CONFIG_NET_IPGRE_DEMUX=y +CONFIG_NET_IPGRE=y CONFIG_NET_IPVTI=y CONFIG_INET_ESP=y # CONFIG_INET_XFRM_MODE_BEET is not set @@ -88,6 +92,7 @@ CONFIG_INET6_ESP=y CONFIG_INET6_IPCOMP=y CONFIG_IPV6_MIP6=y CONFIG_IPV6_VTI=y +CONFIG_IPV6_GRE=y CONFIG_IPV6_MULTIPLE_TABLES=y CONFIG_IPV6_SUBTREES=y CONFIG_NETFILTER=y @@ -178,6 +183,8 @@ CONFIG_CFG80211=y CONFIG_MAC80211=y CONFIG_RFKILL=y # CONFIG_UEVENT_HELPER is not set +CONFIG_FW_LOADER_USER_HELPER=y +CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y # CONFIG_FW_CACHE is not set # CONFIG_ALLOW_DEV_COREDUMP is not set CONFIG_GNSS=y @@ -191,6 +198,7 @@ CONFIG_SCSI=y # CONFIG_SCSI_MQ_DEFAULT is not set # CONFIG_SCSI_PROC_FS is not set CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_SG=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y CONFIG_DM_CRYPT=y @@ -204,6 +212,7 @@ CONFIG_DM_BOW=y CONFIG_NETDEVICES=y CONFIG_DUMMY=y CONFIG_TUN=y +CONFIG_VETH=y # CONFIG_ETHERNET is not set CONFIG_PHYLIB=y CONFIG_PPP=y @@ -252,11 +261,13 @@ CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y # CONFIG_SERIAL_8250_EXAR is not set CONFIG_SERIAL_OF_PLATFORM=y +CONFIG_SERIAL_MSM_GENI_EARLY_CONSOLE=y CONFIG_SERIAL_DEV_BUS=y CONFIG_HW_RANDOM=y CONFIG_HPET=y # CONFIG_DEVPORT is not set # CONFIG_I2C_COMPAT is not set +CONFIG_I2C_CHARDEV=y # CONFIG_I2C_HELPER_AUTO is not set CONFIG_SPI=y CONFIG_GPIOLIB=y @@ -270,6 +281,8 @@ CONFIG_REGULATOR=y CONFIG_REGULATOR_FIXED_VOLTAGE=y CONFIG_MEDIA_SUPPORT=y CONFIG_MEDIA_CAMERA_SUPPORT=y +CONFIG_MEDIA_CONTROLLER=y +CONFIG_VIDEO_V4L2_SUBDEV_API=y # CONFIG_VGA_ARB is not set CONFIG_DRM=y # CONFIG_DRM_FBDEV_EMULATION is not set @@ -281,7 +294,6 @@ CONFIG_SND=y CONFIG_SND_HRTIMER=y CONFIG_SND_DYNAMIC_MINORS=y # CONFIG_SND_SUPPORT_OLD_API is not set -# CONFIG_SND_VERBOSE_PROCFS is not set # CONFIG_SND_DRIVERS is not set CONFIG_SND_USB_AUDIO=y CONFIG_SND_SOC=y @@ -308,16 +320,18 @@ CONFIG_USB_CONFIGFS_F_MIDI=y CONFIG_MMC=y # CONFIG_PWRSEQ_EMMC is not set # CONFIG_PWRSEQ_SIMPLE is not set +CONFIG_MMC_CRYPTO=y CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y CONFIG_LEDS_TRIGGERS=y +CONFIG_LEDS_TRIGGER_TRANSIENT=y CONFIG_RTC_CLASS=y -# CONFIG_RTC_SYSTOHC is not set CONFIG_UIO=y CONFIG_STAGING=y CONFIG_ASHMEM=y +CONFIG_REMOTEPROC=y CONFIG_PM_DEVFREQ=y CONFIG_IIO=y CONFIG_ANDROID=y @@ -402,6 +416,8 @@ CONFIG_SECURITY=y CONFIG_SECURITYFS=y CONFIG_SECURITY_NETWORK=y CONFIG_HARDENED_USERCOPY=y +CONFIG_FORTIFY_SOURCE=y +CONFIG_STATIC_USERMODEHELPER=y CONFIG_SECURITY_SELINUX=y CONFIG_INIT_STACK_ALL=y CONFIG_INIT_ON_ALLOC_DEFAULT_ON=y diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h index 31fbb4a7d9f6716997611c8d10a12c2b9264f150..993dd06c8923c1f53e8f41a48b510af655c1c31d 100644 --- a/arch/x86/entry/calling.h +++ b/arch/x86/entry/calling.h @@ -98,13 +98,6 @@ For 32-bit we have the following conventions - kernel is built with #define SIZEOF_PTREGS 21*8 .macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax save_ret=0 - /* - * Push registers and sanitize registers of values that a - * speculation attack might otherwise want to exploit. The - * lower registers are likely clobbered well before they - * could be put to use in a speculative execution gadget. - * Interleave XOR with PUSH for better uop scheduling: - */ .if \save_ret pushq %rsi /* pt_regs->si */ movq 8(%rsp), %rsi /* temporarily store the return address in %rsi */ @@ -114,34 +107,43 @@ For 32-bit we have the following conventions - kernel is built with pushq %rsi /* pt_regs->si */ .endif pushq \rdx /* pt_regs->dx */ - xorl %edx, %edx /* nospec dx */ pushq %rcx /* pt_regs->cx */ - xorl %ecx, %ecx /* nospec cx */ pushq \rax /* pt_regs->ax */ pushq %r8 /* pt_regs->r8 */ - xorl %r8d, %r8d /* nospec r8 */ pushq %r9 /* pt_regs->r9 */ - xorl %r9d, %r9d /* nospec r9 */ pushq %r10 /* pt_regs->r10 */ - xorl %r10d, %r10d /* nospec r10 */ pushq %r11 /* pt_regs->r11 */ - xorl %r11d, %r11d /* nospec r11*/ pushq %rbx /* pt_regs->rbx */ - xorl %ebx, %ebx /* nospec rbx*/ pushq %rbp /* pt_regs->rbp */ - xorl %ebp, %ebp /* nospec rbp*/ pushq %r12 /* pt_regs->r12 */ - xorl %r12d, %r12d /* nospec r12*/ pushq %r13 /* pt_regs->r13 */ - xorl %r13d, %r13d /* nospec r13*/ pushq %r14 /* pt_regs->r14 */ - xorl %r14d, %r14d /* nospec r14*/ pushq %r15 /* pt_regs->r15 */ - xorl %r15d, %r15d /* nospec r15*/ UNWIND_HINT_REGS + .if \save_ret pushq %rsi /* return address on top of stack */ .endif + + /* + * Sanitize registers of values that a speculation attack might + * otherwise want to exploit. The lower registers are likely clobbered + * well before they could be put to use in a speculative execution + * gadget. + */ + xorl %edx, %edx /* nospec dx */ + xorl %ecx, %ecx /* nospec cx */ + xorl %r8d, %r8d /* nospec r8 */ + xorl %r9d, %r9d /* nospec r9 */ + xorl %r10d, %r10d /* nospec r10 */ + xorl %r11d, %r11d /* nospec r11 */ + xorl %ebx, %ebx /* nospec rbx */ + xorl %ebp, %ebp /* nospec rbp */ + xorl %r12d, %r12d /* nospec r12 */ + xorl %r13d, %r13d /* nospec r13 */ + xorl %r14d, %r14d /* nospec r14 */ + xorl %r15d, %r15d /* nospec r15 */ + .endm .macro POP_REGS pop_rdi=1 skip_r11rcx=0 diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index d07432062ee679c922203a475871a658cbc928c6..37d9016d476837355f204d963875462d4f09ac27 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S @@ -1489,6 +1489,7 @@ ENTRY(int3) END(int3) ENTRY(general_protection) + ASM_CLAC pushl $do_general_protection jmp common_exception END(general_protection) diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index ccb5e3486aee79b485d05f0e99628461fc9019ec..dfe26f3cfffc21d61150a1107160bf26f32021cd 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -312,7 +312,6 @@ GLOBAL(entry_SYSCALL_64_after_hwframe) */ syscall_return_via_sysret: /* rcx and r11 are already restored (see code above) */ - UNWIND_HINT_EMPTY POP_REGS pop_rdi=0 skip_r11rcx=1 /* @@ -321,6 +320,7 @@ syscall_return_via_sysret: */ movq %rsp, %rdi movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp + UNWIND_HINT_EMPTY pushq RSP-RDI(%rdi) /* RSP */ pushq (%rdi) /* RDI */ @@ -575,7 +575,7 @@ END(spurious_entries_start) * +----------------------------------------------------+ */ ENTRY(interrupt_entry) - UNWIND_HINT_FUNC + UNWIND_HINT_IRET_REGS offset=16 ASM_CLAC cld @@ -607,9 +607,9 @@ ENTRY(interrupt_entry) pushq 5*8(%rdi) /* regs->eflags */ pushq 4*8(%rdi) /* regs->cs */ pushq 3*8(%rdi) /* regs->ip */ + UNWIND_HINT_IRET_REGS pushq 2*8(%rdi) /* regs->orig_ax */ pushq 8(%rdi) /* return address */ - UNWIND_HINT_FUNC movq (%rdi), %rdi jmp 2f @@ -700,6 +700,7 @@ GLOBAL(swapgs_restore_regs_and_return_to_usermode) */ movq %rsp, %rdi movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp + UNWIND_HINT_EMPTY /* Copy the IRET frame to the trampoline stack. */ pushq 6*8(%rdi) /* SS */ @@ -1744,7 +1745,7 @@ ENTRY(rewind_stack_do_exit) movq PER_CPU_VAR(cpu_current_top_of_stack), %rax leaq -PTREGS_SIZE(%rax), %rsp - UNWIND_HINT_FUNC sp_offset=PTREGS_SIZE + UNWIND_HINT_REGS call do_exit END(rewind_stack_do_exit) diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile index c66b333a08ac584eac321b8926ac70f24570e186..cd7f6d9d6e58020769def1d8d87ff40664ce5fd8 100644 --- a/arch/x86/entry/vdso/Makefile +++ b/arch/x86/entry/vdso/Makefile @@ -3,6 +3,12 @@ # Building vDSO images for x86. # +# Absolute relocation type $(ARCH_REL_TYPE_ABS) needs to be defined before +# the inclusion of generic Makefile. +ARCH_REL_TYPE_ABS := R_X86_64_JUMP_SLOT|R_X86_64_GLOB_DAT|R_X86_64_RELATIVE| +ARCH_REL_TYPE_ABS += R_386_GLOB_DAT|R_386_JMP_SLOT|R_386_RELATIVE +include $(srctree)/lib/vdso/Makefile + KASAN_SANITIZE := n UBSAN_SANITIZE := n OBJECT_FILES_NON_STANDARD := y @@ -49,7 +55,7 @@ VDSO_LDFLAGS_vdso.lds = -m elf_x86_64 -soname linux-vdso.so.1 --no-undefined \ -z max-page-size=4096 $(obj)/vdso64.so.dbg: $(obj)/vdso.lds $(vobjs) FORCE - $(call if_changed,vdso) + $(call if_changed,vdso_and_check) HOST_EXTRACFLAGS += -I$(srctree)/tools/include -I$(srctree)/include/uapi -I$(srctree)/arch/$(SUBARCH)/include/uapi hostprogs-y += vdso2c @@ -75,7 +81,7 @@ ifneq ($(RETPOLINE_VDSO_CFLAGS),) endif endif -$(vobjs): KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS) $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS)) $(CFL) +$(vobjs): KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_LTO) $(GCC_PLUGINS_CFLAGS) $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS)) $(CFL) # # vDSO code runs in userspace and -pg doesn't help with profiling anyway. @@ -119,7 +125,7 @@ $(obj)/%.so: $(obj)/%.so.dbg $(call if_changed,objcopy) $(obj)/vdsox32.so.dbg: $(obj)/vdsox32.lds $(vobjx32s) FORCE - $(call if_changed,vdso) + $(call if_changed,vdso_and_check) CPPFLAGS_vdso32.lds = $(CPPFLAGS_vdso.lds) VDSO_LDFLAGS_vdso32.lds = -m elf_i386 -soname linux-gate.so.1 @@ -140,6 +146,7 @@ KBUILD_CFLAGS_32 := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS_32)) KBUILD_CFLAGS_32 := $(filter-out $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS_32)) KBUILD_CFLAGS_32 := $(filter-out $(LTO_CFLAGS),$(KBUILD_CFLAGS_32)) KBUILD_CFLAGS_32 := $(filter-out $(CFI_CFLAGS),$(KBUILD_CFLAGS_32)) +KBUILD_CFLAGS_32 := $(filter-out $(CC_FLAGS_LTO),$(KBUILD_CFLAGS_32)) KBUILD_CFLAGS_32 += -m32 -msoft-float -mregparm=0 -fpic KBUILD_CFLAGS_32 += $(call cc-option, -fno-stack-protector) KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls) @@ -160,7 +167,7 @@ $(obj)/vdso32.so.dbg: FORCE \ $(obj)/vdso32/note.o \ $(obj)/vdso32/system_call.o \ $(obj)/vdso32/sigreturn.o - $(call if_changed,vdso) + $(call if_changed,vdso_and_check) # # The DSO images are built using a special linker script. @@ -176,6 +183,9 @@ VDSO_LDFLAGS = -shared $(call ld-option, --hash-style=both) \ -Bsymbolic GCOV_PROFILE := n +quiet_cmd_vdso_and_check = VDSO $@ + cmd_vdso_and_check = $(cmd_vdso); $(cmd_vdso_check) + # # Install the unstripped copies of vdso*.so. If our toolchain supports # build-id, install .build-id links as well. diff --git a/arch/x86/entry/vdso/vclock_gettime.c b/arch/x86/entry/vdso/vclock_gettime.c index 8a88e738f87db7d49265814cfc99e0d262677fa5..54157e800611449eb4b120d6cd8921dc9b63769a 100644 --- a/arch/x86/entry/vdso/vclock_gettime.c +++ b/arch/x86/entry/vdso/vclock_gettime.c @@ -1,341 +1,84 @@ /* - * Copyright 2006 Andi Kleen, SUSE Labs. - * Subject to the GNU Public License, v.2 - * * Fast user context implementation of clock_gettime, gettimeofday, and time. * + * Copyright 2006 Andi Kleen, SUSE Labs. + * Copyright 2019 ARM Limited + * * 32 Bit compat layer by Stefani Seibold * sponsored by Rohde & Schwarz GmbH & Co. KG Munich/Germany - * - * The code should have no internal unresolved relocations. - * Check with readelf after changing. */ - -#include -#include -#include -#include -#include -#include -#include -#include #include #include +#include -#define gtod (&VVAR(vsyscall_gtod_data)) +#include "../../../../lib/vdso/gettimeofday.c" -extern int __vdso_clock_gettime(clockid_t clock, struct timespec *ts); -extern int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz); +extern int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz); extern time_t __vdso_time(time_t *t); -#ifdef CONFIG_PARAVIRT_CLOCK -extern u8 pvclock_page[PAGE_SIZE] - __attribute__((visibility("hidden"))); -#endif - -#ifdef CONFIG_HYPERV_TSCPAGE -extern u8 hvclock_page[PAGE_SIZE] - __attribute__((visibility("hidden"))); -#endif - -#ifndef BUILD_VDSO32 - -notrace static long vdso_fallback_gettime(long clock, struct timespec *ts) +int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz) { - long ret; - asm ("syscall" : "=a" (ret), "=m" (*ts) : - "0" (__NR_clock_gettime), "D" (clock), "S" (ts) : - "memory", "rcx", "r11"); - return ret; + return __cvdso_gettimeofday(tv, tz); } -notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz) -{ - long ret; - - asm ("syscall" : "=a" (ret), "=m" (*tv), "=m" (*tz) : - "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : - "memory", "rcx", "r11"); - return ret; -} - - -#else +int gettimeofday(struct __kernel_old_timeval *, struct timezone *) + __attribute__((weak, alias("__vdso_gettimeofday"))); -notrace static long vdso_fallback_gettime(long clock, struct timespec *ts) +time_t __vdso_time(time_t *t) { - long ret; - - asm ( - "mov %%ebx, %%edx \n" - "mov %[clock], %%ebx \n" - "call __kernel_vsyscall \n" - "mov %%edx, %%ebx \n" - : "=a" (ret), "=m" (*ts) - : "0" (__NR_clock_gettime), [clock] "g" (clock), "c" (ts) - : "memory", "edx"); - return ret; + return __cvdso_time(t); } -notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz) -{ - long ret; +time_t time(time_t *t) __attribute__((weak, alias("__vdso_time"))); - asm ( - "mov %%ebx, %%edx \n" - "mov %[tv], %%ebx \n" - "call __kernel_vsyscall \n" - "mov %%edx, %%ebx \n" - : "=a" (ret), "=m" (*tv), "=m" (*tz) - : "0" (__NR_gettimeofday), [tv] "g" (tv), "c" (tz) - : "memory", "edx"); - return ret; -} -#endif +#if defined(CONFIG_X86_64) && !defined(BUILD_VDSO32_64) +/* both 64-bit and x32 use these */ +extern int __vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts); +extern int __vdso_clock_getres(clockid_t clock, struct __kernel_timespec *res); -#ifdef CONFIG_PARAVIRT_CLOCK -static notrace const struct pvclock_vsyscall_time_info *get_pvti0(void) +int __vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts) { - return (const struct pvclock_vsyscall_time_info *)&pvclock_page; -} - -static notrace u64 vread_pvclock(int *mode) -{ - const struct pvclock_vcpu_time_info *pvti = &get_pvti0()->pvti; - u64 ret; - u64 last; - u32 version; - - /* - * Note: The kernel and hypervisor must guarantee that cpu ID - * number maps 1:1 to per-CPU pvclock time info. - * - * Because the hypervisor is entirely unaware of guest userspace - * preemption, it cannot guarantee that per-CPU pvclock time - * info is updated if the underlying CPU changes or that that - * version is increased whenever underlying CPU changes. - * - * On KVM, we are guaranteed that pvti updates for any vCPU are - * atomic as seen by *all* vCPUs. This is an even stronger - * guarantee than we get with a normal seqlock. - * - * On Xen, we don't appear to have that guarantee, but Xen still - * supplies a valid seqlock using the version field. - * - * We only do pvclock vdso timing at all if - * PVCLOCK_TSC_STABLE_BIT is set, and we interpret that bit to - * mean that all vCPUs have matching pvti and that the TSC is - * synced, so we can just look at vCPU 0's pvti. - */ - - do { - version = pvclock_read_begin(pvti); - - if (unlikely(!(pvti->flags & PVCLOCK_TSC_STABLE_BIT))) { - *mode = VCLOCK_NONE; - return 0; - } - - ret = __pvclock_read_cycles(pvti, rdtsc_ordered()); - } while (pvclock_read_retry(pvti, version)); - - /* refer to vread_tsc() comment for rationale */ - last = gtod->cycle_last; - - if (likely(ret >= last)) - return ret; - - return last; + return __cvdso_clock_gettime(clock, ts); } -#endif -#ifdef CONFIG_HYPERV_TSCPAGE -static notrace u64 vread_hvclock(int *mode) -{ - const struct ms_hyperv_tsc_page *tsc_pg = - (const struct ms_hyperv_tsc_page *)&hvclock_page; - u64 current_tick = hv_read_tsc_page(tsc_pg); - if (current_tick != U64_MAX) - return current_tick; - - *mode = VCLOCK_NONE; - return 0; -} -#endif +int clock_gettime(clockid_t, struct __kernel_timespec *) + __attribute__((weak, alias("__vdso_clock_gettime"))); -notrace static u64 vread_tsc(void) +int __vdso_clock_getres(clockid_t clock, + struct __kernel_timespec *res) { - u64 ret = (u64)rdtsc_ordered(); - u64 last = gtod->cycle_last; - - if (likely(ret >= last)) - return ret; - - /* - * GCC likes to generate cmov here, but this branch is extremely - * predictable (it's just a function of time and the likely is - * very likely) and there's a data dependence, so force GCC - * to generate a branch instead. I don't barrier() because - * we don't actually need a barrier, and if this function - * ever gets inlined it will generate worse code. - */ - asm volatile (""); - return last; + return __cvdso_clock_getres(clock, res); } +int clock_getres(clockid_t, struct __kernel_timespec *) + __attribute__((weak, alias("__vdso_clock_getres"))); -notrace static inline u64 vgetsns(int *mode) -{ - u64 v; - cycles_t cycles; - - if (gtod->vclock_mode == VCLOCK_TSC) - cycles = vread_tsc(); - - /* - * For any memory-mapped vclock type, we need to make sure that gcc - * doesn't cleverly hoist a load before the mode check. Otherwise we - * might end up touching the memory-mapped page even if the vclock in - * question isn't enabled, which will segfault. Hence the barriers. - */ -#ifdef CONFIG_PARAVIRT_CLOCK - else if (gtod->vclock_mode == VCLOCK_PVCLOCK) { - barrier(); - cycles = vread_pvclock(mode); - } -#endif -#ifdef CONFIG_HYPERV_TSCPAGE - else if (gtod->vclock_mode == VCLOCK_HVCLOCK) { - barrier(); - cycles = vread_hvclock(mode); - } -#endif - else - return 0; - v = (cycles - gtod->cycle_last) & gtod->mask; - return v * gtod->mult; -} +#else +/* i386 only */ +extern int __vdso_clock_gettime(clockid_t clock, struct old_timespec32 *ts); +extern int __vdso_clock_getres(clockid_t clock, struct old_timespec32 *res); -/* Code size doesn't matter (vdso is 4k anyway) and this is faster. */ -notrace static int __always_inline do_realtime(struct timespec *ts) +int __vdso_clock_gettime(clockid_t clock, struct old_timespec32 *ts) { - unsigned long seq; - u64 ns; - int mode; - - do { - seq = gtod_read_begin(gtod); - mode = gtod->vclock_mode; - ts->tv_sec = gtod->wall_time_sec; - ns = gtod->wall_time_snsec; - ns += vgetsns(&mode); - ns >>= gtod->shift; - } while (unlikely(gtod_read_retry(gtod, seq))); - - ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns); - ts->tv_nsec = ns; - - return mode; + return __cvdso_clock_gettime32(clock, ts); } -notrace static int __always_inline do_monotonic(struct timespec *ts) -{ - unsigned long seq; - u64 ns; - int mode; - - do { - seq = gtod_read_begin(gtod); - mode = gtod->vclock_mode; - ts->tv_sec = gtod->monotonic_time_sec; - ns = gtod->monotonic_time_snsec; - ns += vgetsns(&mode); - ns >>= gtod->shift; - } while (unlikely(gtod_read_retry(gtod, seq))); - - ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns); - ts->tv_nsec = ns; - - return mode; -} +int clock_gettime(clockid_t, struct old_timespec32 *) + __attribute__((weak, alias("__vdso_clock_gettime"))); -notrace static void do_realtime_coarse(struct timespec *ts) +int __vdso_clock_gettime64(clockid_t clock, struct __kernel_timespec *ts) { - unsigned long seq; - do { - seq = gtod_read_begin(gtod); - ts->tv_sec = gtod->wall_time_coarse_sec; - ts->tv_nsec = gtod->wall_time_coarse_nsec; - } while (unlikely(gtod_read_retry(gtod, seq))); + return __cvdso_clock_gettime(clock, ts); } -notrace static void do_monotonic_coarse(struct timespec *ts) -{ - unsigned long seq; - do { - seq = gtod_read_begin(gtod); - ts->tv_sec = gtod->monotonic_time_coarse_sec; - ts->tv_nsec = gtod->monotonic_time_coarse_nsec; - } while (unlikely(gtod_read_retry(gtod, seq))); -} +int clock_gettime64(clockid_t, struct __kernel_timespec *) + __attribute__((weak, alias("__vdso_clock_gettime64"))); -notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts) +int __vdso_clock_getres(clockid_t clock, struct old_timespec32 *res) { - switch (clock) { - case CLOCK_REALTIME: - if (do_realtime(ts) == VCLOCK_NONE) - goto fallback; - break; - case CLOCK_MONOTONIC: - if (do_monotonic(ts) == VCLOCK_NONE) - goto fallback; - break; - case CLOCK_REALTIME_COARSE: - do_realtime_coarse(ts); - break; - case CLOCK_MONOTONIC_COARSE: - do_monotonic_coarse(ts); - break; - default: - goto fallback; - } - - return 0; -fallback: - return vdso_fallback_gettime(clock, ts); + return __cvdso_clock_getres_time32(clock, res); } -int clock_gettime(clockid_t, struct timespec *) - __attribute__((weak, alias("__vdso_clock_gettime"))); - -notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz) -{ - if (likely(tv != NULL)) { - if (unlikely(do_realtime((struct timespec *)tv) == VCLOCK_NONE)) - return vdso_fallback_gtod(tv, tz); - tv->tv_usec /= 1000; - } - if (unlikely(tz != NULL)) { - tz->tz_minuteswest = gtod->tz_minuteswest; - tz->tz_dsttime = gtod->tz_dsttime; - } - return 0; -} -int gettimeofday(struct timeval *, struct timezone *) - __attribute__((weak, alias("__vdso_gettimeofday"))); - -/* - * This will break when the xtime seconds get inaccurate, but that is - * unlikely - */ -notrace time_t __vdso_time(time_t *t) -{ - /* This is atomic on x86 so we don't need any locks. */ - time_t result = READ_ONCE(gtod->wall_time_sec); - - if (t) - *t = result; - return result; -} -time_t time(time_t *t) - __attribute__((weak, alias("__vdso_time"))); +int clock_getres(clockid_t, struct old_timespec32 *) + __attribute__((weak, alias("__vdso_clock_getres"))); +#endif diff --git a/arch/x86/entry/vdso/vdso.lds.S b/arch/x86/entry/vdso/vdso.lds.S index d3a2dce4cfa96d0d64b36563f5af0bd57d13573e..36b644e162726bb2d13194d869e4670b0ffb997c 100644 --- a/arch/x86/entry/vdso/vdso.lds.S +++ b/arch/x86/entry/vdso/vdso.lds.S @@ -25,6 +25,8 @@ VERSION { __vdso_getcpu; time; __vdso_time; + clock_getres; + __vdso_clock_getres; local: *; }; } diff --git a/arch/x86/entry/vdso/vdso32/vdso32.lds.S b/arch/x86/entry/vdso/vdso32/vdso32.lds.S index 422764a81d324223e25757f90c081fd2b5d272ab..c7720995ab1af9362ae9cfbcc99d8c05ff595895 100644 --- a/arch/x86/entry/vdso/vdso32/vdso32.lds.S +++ b/arch/x86/entry/vdso/vdso32/vdso32.lds.S @@ -26,6 +26,8 @@ VERSION __vdso_clock_gettime; __vdso_gettimeofday; __vdso_time; + __vdso_clock_getres; + __vdso_clock_gettime64; }; LINUX_2.5 { diff --git a/arch/x86/entry/vdso/vdsox32.lds.S b/arch/x86/entry/vdso/vdsox32.lds.S index 05cd1c5c4a1575c5eb64d0757aebc7229dc5c1ea..16a8050a4fb65bab6ee26154420492a95f5e01f4 100644 --- a/arch/x86/entry/vdso/vdsox32.lds.S +++ b/arch/x86/entry/vdso/vdsox32.lds.S @@ -21,6 +21,7 @@ VERSION { __vdso_gettimeofday; __vdso_getcpu; __vdso_time; + __vdso_clock_getres; local: *; }; } diff --git a/arch/x86/entry/vdso/vgetcpu.c b/arch/x86/entry/vdso/vgetcpu.c index 8ec3d1f4ce9a4d378d6042650177d4b32a9808e8..edd214f5264dd653741984a28a094d72364cfd39 100644 --- a/arch/x86/entry/vdso/vgetcpu.c +++ b/arch/x86/entry/vdso/vgetcpu.c @@ -13,14 +13,7 @@ notrace long __vdso_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *unused) { - unsigned int p; - - p = __getcpu(); - - if (cpu) - *cpu = p & VGETCPU_CPU_MASK; - if (node) - *node = p >> 12; + vdso_read_cpunode(cpu, node); return 0; } diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c index 5b8b556dbb12aa91664aac85fad9e0d5af0259cd..3f9d43f26f630b29ef9cb53f39d9f2079df84a06 100644 --- a/arch/x86/entry/vdso/vma.c +++ b/arch/x86/entry/vdso/vma.c @@ -332,40 +332,6 @@ static __init int vdso_setup(char *s) return 0; } __setup("vdso=", vdso_setup); -#endif - -#ifdef CONFIG_X86_64 -static void vgetcpu_cpu_init(void *arg) -{ - int cpu = smp_processor_id(); - struct desc_struct d = { }; - unsigned long node = 0; -#ifdef CONFIG_NUMA - node = cpu_to_node(cpu); -#endif - if (static_cpu_has(X86_FEATURE_RDTSCP)) - write_rdtscp_aux((node << 12) | cpu); - - /* - * Store cpu number in limit so that it can be loaded - * quickly in user space in vgetcpu. (12 bits for the CPU - * and 8 bits for the node) - */ - d.limit0 = cpu | ((node & 0xf) << 12); - d.limit1 = node >> 4; - d.type = 5; /* RO data, expand down, accessed */ - d.dpl = 3; /* Visible to user code */ - d.s = 1; /* Not a system segment */ - d.p = 1; /* Present */ - d.d = 1; /* 32-bit */ - - write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S); -} - -static int vgetcpu_online(unsigned int cpu) -{ - return smp_call_function_single(cpu, vgetcpu_cpu_init, NULL, 1); -} static int __init init_vdso(void) { @@ -375,9 +341,7 @@ static int __init init_vdso(void) init_vdso_image(&vdso_image_x32); #endif - /* notifier priority > KVM */ - return cpuhp_setup_state(CPUHP_AP_X86_VDSO_VMA_ONLINE, - "x86/vdso/vma:online", vgetcpu_online, NULL); + return 0; } subsys_initcall(init_vdso); #endif /* CONFIG_X86_64 */ diff --git a/arch/x86/entry/vsyscall/Makefile b/arch/x86/entry/vsyscall/Makefile index a9f4856f622ae4a1453c33f15083e35bbbf4f121..83126271de241ed773c503b3bedb95a57ff1a8fc 100644 --- a/arch/x86/entry/vsyscall/Makefile +++ b/arch/x86/entry/vsyscall/Makefile @@ -1,7 +1,5 @@ # # Makefile for the x86 low level vsyscall code # -obj-y := vsyscall_gtod.o - obj-$(CONFIG_X86_VSYSCALL_EMULATION) += vsyscall_64.o vsyscall_emu_64.o diff --git a/arch/x86/entry/vsyscall/vsyscall_gtod.c b/arch/x86/entry/vsyscall/vsyscall_gtod.c deleted file mode 100644 index e1216dd95c04aa5cbaa6888a1d9f01d6246b21ce..0000000000000000000000000000000000000000 --- a/arch/x86/entry/vsyscall/vsyscall_gtod.c +++ /dev/null @@ -1,78 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (C) 2001 Andrea Arcangeli SuSE - * Copyright 2003 Andi Kleen, SuSE Labs. - * - * Modified for x86 32 bit architecture by - * Stefani Seibold - * sponsored by Rohde & Schwarz GmbH & Co. KG Munich/Germany - * - * Thanks to hpa@transmeta.com for some useful hint. - * Special thanks to Ingo Molnar for his early experience with - * a different vsyscall implementation for Linux/IA32 and for the name. - * - */ - -#include -#include -#include - -int vclocks_used __read_mostly; - -DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data); - -void update_vsyscall_tz(void) -{ - vsyscall_gtod_data.tz_minuteswest = sys_tz.tz_minuteswest; - vsyscall_gtod_data.tz_dsttime = sys_tz.tz_dsttime; -} - -void update_vsyscall(struct timekeeper *tk) -{ - int vclock_mode = tk->tkr_mono.clock->archdata.vclock_mode; - struct vsyscall_gtod_data *vdata = &vsyscall_gtod_data; - - /* Mark the new vclock used. */ - BUILD_BUG_ON(VCLOCK_MAX >= 32); - WRITE_ONCE(vclocks_used, READ_ONCE(vclocks_used) | (1 << vclock_mode)); - - gtod_write_begin(vdata); - - /* copy vsyscall data */ - vdata->vclock_mode = vclock_mode; - vdata->cycle_last = tk->tkr_mono.cycle_last; - vdata->mask = tk->tkr_mono.mask; - vdata->mult = tk->tkr_mono.mult; - vdata->shift = tk->tkr_mono.shift; - - vdata->wall_time_sec = tk->xtime_sec; - vdata->wall_time_snsec = tk->tkr_mono.xtime_nsec; - - vdata->monotonic_time_sec = tk->xtime_sec - + tk->wall_to_monotonic.tv_sec; - vdata->monotonic_time_snsec = tk->tkr_mono.xtime_nsec - + ((u64)tk->wall_to_monotonic.tv_nsec - << tk->tkr_mono.shift); - while (vdata->monotonic_time_snsec >= - (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) { - vdata->monotonic_time_snsec -= - ((u64)NSEC_PER_SEC) << tk->tkr_mono.shift; - vdata->monotonic_time_sec++; - } - - vdata->wall_time_coarse_sec = tk->xtime_sec; - vdata->wall_time_coarse_nsec = (long)(tk->tkr_mono.xtime_nsec >> - tk->tkr_mono.shift); - - vdata->monotonic_time_coarse_sec = - vdata->wall_time_coarse_sec + tk->wall_to_monotonic.tv_sec; - vdata->monotonic_time_coarse_nsec = - vdata->wall_time_coarse_nsec + tk->wall_to_monotonic.tv_nsec; - - while (vdata->monotonic_time_coarse_nsec >= NSEC_PER_SEC) { - vdata->monotonic_time_coarse_nsec -= NSEC_PER_SEC; - vdata->monotonic_time_coarse_sec++; - } - - gtod_write_end(vdata); -} diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c index 8a9cff1f129dc57efb0be3d4a8b5719f8eaa436d..1663ad84778baf8bc367868259214849b014b0f9 100644 --- a/arch/x86/hyperv/hv_init.c +++ b/arch/x86/hyperv/hv_init.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #ifdef CONFIG_HYPERV_TSCPAGE @@ -427,11 +428,14 @@ void hyperv_cleanup(void) } EXPORT_SYMBOL_GPL(hyperv_cleanup); -void hyperv_report_panic(struct pt_regs *regs, long err) +void hyperv_report_panic(struct pt_regs *regs, long err, bool in_die) { static bool panic_reported; u64 guest_id; + if (in_die && !panic_on_oops) + return; + /* * We prefer to report panic on 'die' chain as we have proper * registers to report, but if we miss it (e.g. on BUG()) we need diff --git a/arch/x86/include/asm/clocksource.h b/arch/x86/include/asm/clocksource.h index dc4cfc888d6df3e95eec3de3a26f8ad60b40b18b..3f73dff6c195f77df00bc52c989b017afd200c16 100644 --- a/arch/x86/include/asm/clocksource.h +++ b/arch/x86/include/asm/clocksource.h @@ -4,11 +4,7 @@ #ifndef _ASM_X86_CLOCKSOURCE_H #define _ASM_X86_CLOCKSOURCE_H -#define VCLOCK_NONE 0 /* No vDSO clock available. */ -#define VCLOCK_TSC 1 /* vDSO should use vread_tsc. */ -#define VCLOCK_PVCLOCK 2 /* vDSO should use vread_pvclock. */ -#define VCLOCK_HVCLOCK 3 /* vDSO should use vread_hvclock. */ -#define VCLOCK_MAX 3 +#include struct arch_clocksource_data { int vclock_mode; diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 067288d4ef6e29d6f5e28cdeeaf925c8d2c1334e..33136395db8fc7b16792df36a32e281cb3aec860 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -622,10 +622,10 @@ struct kvm_vcpu_arch { bool pvclock_set_guest_stopped_request; struct { + u8 preempted; u64 msr_val; u64 last_steal; - struct gfn_to_hva_cache stime; - struct kvm_steal_time steal; + struct gfn_to_pfn_cache cache; } st; u64 tsc_offset; @@ -1070,7 +1070,7 @@ struct kvm_x86_ops { bool (*xsaves_supported)(void); bool (*umip_emulated)(void); - int (*check_nested_events)(struct kvm_vcpu *vcpu, bool external_intr); + int (*check_nested_events)(struct kvm_vcpu *vcpu); void (*request_immediate_exit)(struct kvm_vcpu *vcpu); void (*sched_in)(struct kvm_vcpu *kvm, int cpu); diff --git a/arch/x86/include/asm/microcode_amd.h b/arch/x86/include/asm/microcode_amd.h index 209492849566cefc56305cd8e976fbf4a25d7527..5c524d4f71cd7b2a99b7037f7962e334f227a94c 100644 --- a/arch/x86/include/asm/microcode_amd.h +++ b/arch/x86/include/asm/microcode_amd.h @@ -41,7 +41,7 @@ struct microcode_amd { unsigned int mpb[0]; }; -#define PATCH_MAX_SIZE PAGE_SIZE +#define PATCH_MAX_SIZE (3 * PAGE_SIZE) #ifdef CONFIG_MICROCODE_AMD extern void __init load_ucode_amd_bsp(unsigned int family); diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h index f37704497d8f352ff14230e29cd4527e67fb80ea..5b58a6cf487ff4a550e9413a8db00758dd029c2d 100644 --- a/arch/x86/include/asm/mshyperv.h +++ b/arch/x86/include/asm/mshyperv.h @@ -338,7 +338,7 @@ static inline int cpumask_to_vpset(struct hv_vpset *vpset, void __init hyperv_init(void); void hyperv_setup_mmu_ops(void); -void hyperv_report_panic(struct pt_regs *regs, long err); +void hyperv_report_panic(struct pt_regs *regs, long err, bool in_die); void hyperv_report_panic_msg(phys_addr_t pa, size_t size); bool hv_is_hyperv_initialized(void); void hyperv_cleanup(void); diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 690c0307afed0932974e5965ccf7bed98daa10f9..2e1ed12c65f82cd01d225b3b9224bdcd6ef9e84a 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -608,12 +608,15 @@ static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) return __pmd(val); } -/* mprotect needs to preserve PAT bits when updating vm_page_prot */ +/* + * mprotect needs to preserve PAT and encryption bits when updating + * vm_page_prot + */ #define pgprot_modify pgprot_modify static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) { pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK; - pgprotval_t addbits = pgprot_val(newprot); + pgprotval_t addbits = pgprot_val(newprot) & ~_PAGE_CHG_MASK; return __pgprot(preservebits | addbits); } diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index 106b7d0e2dae5b4ca34fd41fbe996f851b5c5168..71ea49e7db74722df2bab7811de8c47871343573 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h @@ -124,7 +124,7 @@ */ #define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \ _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY | \ - _PAGE_SOFT_DIRTY | _PAGE_DEVMAP) + _PAGE_SOFT_DIRTY | _PAGE_DEVMAP | _PAGE_ENC) #define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE) /* diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index efb44bd3a7140be1df3e24398028c650ccfc0739..cdaec435966bb2917b659783f3dafb4e18a81710 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -24,6 +24,7 @@ struct vm86; #include #include #include +#include #include #include @@ -659,17 +660,6 @@ static inline unsigned int cpuid_edx(unsigned int op) return edx; } -/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ -static __always_inline void rep_nop(void) -{ - asm volatile("rep; nop" ::: "memory"); -} - -static __always_inline void cpu_relax(void) -{ - rep_nop(); -} - /* * This function forces the icache and prefetched instruction stream to * catch up with reality in two very specific cases: diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h index b6033680d458e1f5db37fe355e7f50ae0bd1f5db..19b695ff2c68e3b51b7006a46cfbd90b750ac6f0 100644 --- a/arch/x86/include/asm/pvclock.h +++ b/arch/x86/include/asm/pvclock.h @@ -2,7 +2,7 @@ #ifndef _ASM_X86_PVCLOCK_H #define _ASM_X86_PVCLOCK_H -#include +#include #include /* some helper functions for xen and kvm pv clock sources */ diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h index e293c122d0d54fbc802c1212edc9ed099be582f0..a314087add072347f4bc4379e4f72eafdb8938cf 100644 --- a/arch/x86/include/asm/segment.h +++ b/arch/x86/include/asm/segment.h @@ -186,8 +186,7 @@ #define GDT_ENTRY_TLS_MIN 12 #define GDT_ENTRY_TLS_MAX 14 -/* Abused to load per CPU data from limit */ -#define GDT_ENTRY_PER_CPU 15 +#define GDT_ENTRY_CPUNODE 15 /* * Number of entries in the GDT table: @@ -207,7 +206,7 @@ #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8 + 3) #define __USER32_DS __USER_DS #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8 + 3) -#define __PER_CPU_SEG (GDT_ENTRY_PER_CPU*8 + 3) +#define __CPUNODE_SEG (GDT_ENTRY_CPUNODE*8 + 3) #endif @@ -225,6 +224,47 @@ #define GDT_ENTRY_TLS_ENTRIES 3 #define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES* 8) +#ifdef CONFIG_X86_64 + +/* Bit size and mask of CPU number stored in the per CPU data (and TSC_AUX) */ +#define VDSO_CPUNODE_BITS 12 +#define VDSO_CPUNODE_MASK 0xfff + +#ifndef __ASSEMBLY__ + +/* Helper functions to store/load CPU and node numbers */ + +static inline unsigned long vdso_encode_cpunode(int cpu, unsigned long node) +{ + return (node << VDSO_CPUNODE_BITS) | cpu; +} + +static inline void vdso_read_cpunode(unsigned *cpu, unsigned *node) +{ + unsigned int p; + + /* + * Load CPU and node number from the GDT. LSL is faster than RDTSCP + * and works on all CPUs. This is volatile so that it orders + * correctly with respect to barrier() and to keep GCC from cleverly + * hoisting it out of the calling function. + * + * If RDPID is available, use it. + */ + alternative_io ("lsl %[seg],%[p]", + ".byte 0xf3,0x0f,0xc7,0xf8", /* RDPID %eax/rax */ + X86_FEATURE_RDPID, + [p] "=a" (p), [seg] "r" (__CPUNODE_SEG)); + + if (cpu) + *cpu = (p & VDSO_CPUNODE_MASK); + if (node) + *node = (p >> VDSO_CPUNODE_BITS); +} + +#endif /* !__ASSEMBLY__ */ +#endif /* CONFIG_X86_64 */ + #ifdef __KERNEL__ /* diff --git a/arch/x86/include/asm/unwind.h b/arch/x86/include/asm/unwind.h index 499578f7e6d7bb5020fe0c503c70b1be0f5b95f1..70fc159ebe6959fead369c46d8a143812a1bc058 100644 --- a/arch/x86/include/asm/unwind.h +++ b/arch/x86/include/asm/unwind.h @@ -19,7 +19,7 @@ struct unwind_state { #if defined(CONFIG_UNWINDER_ORC) bool signal, full_regs; unsigned long sp, bp, ip; - struct pt_regs *regs; + struct pt_regs *regs, *prev_regs; #elif defined(CONFIG_UNWINDER_FRAME_POINTER) bool got_irq; unsigned long *bp, *orig_sp, ip; diff --git a/arch/x86/include/asm/vdso/clocksource.h b/arch/x86/include/asm/vdso/clocksource.h new file mode 100644 index 0000000000000000000000000000000000000000..17aa17953f38abdd6235d4e002e02601e01de998 --- /dev/null +++ b/arch/x86/include/asm/vdso/clocksource.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_VDSO_CLOCKSOURCE_H +#define __ASM_VDSO_CLOCKSOURCE_H + +#define VCLOCK_NONE 0 /* No vDSO clock available. */ +#define VCLOCK_TSC 1 /* vDSO should use vread_tsc. */ +#define VCLOCK_PVCLOCK 2 /* vDSO should use vread_pvclock. */ +#define VCLOCK_HVCLOCK 3 /* vDSO should use vread_hvclock. */ +#define VCLOCK_MAX 3 + +#endif /* __ASM_VDSO_CLOCKSOURCE_H */ diff --git a/arch/x86/include/asm/vdso/gettimeofday.h b/arch/x86/include/asm/vdso/gettimeofday.h new file mode 100644 index 0000000000000000000000000000000000000000..d341f33ec74e05e6d484f399994b758c10cc96be --- /dev/null +++ b/arch/x86/include/asm/vdso/gettimeofday.h @@ -0,0 +1,298 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Fast user context implementation of clock_gettime, gettimeofday, and time. + * + * Copyright (C) 2019 ARM Limited. + * Copyright 2006 Andi Kleen, SUSE Labs. + * 32 Bit compat layer by Stefani Seibold + * sponsored by Rohde & Schwarz GmbH & Co. KG Munich/Germany + */ +#ifndef __ASM_VDSO_GETTIMEOFDAY_H +#define __ASM_VDSO_GETTIMEOFDAY_H + +#ifndef __ASSEMBLY__ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define __vdso_data (VVAR(_vdso_data)) + +#define VDSO_HAS_TIME 1 + +#define VDSO_HAS_CLOCK_GETRES 1 + +/* + * Declare the memory-mapped vclock data pages. These come from hypervisors. + * If we ever reintroduce something like direct access to an MMIO clock like + * the HPET again, it will go here as well. + * + * A load from any of these pages will segfault if the clock in question is + * disabled, so appropriate compiler barriers and checks need to be used + * to prevent stray loads. + * + * These declarations MUST NOT be const. The compiler will assume that + * an extern const variable has genuinely constant contents, and the + * resulting code won't work, since the whole point is that these pages + * change over time, possibly while we're accessing them. + */ + +#ifdef CONFIG_PARAVIRT_CLOCK +/* + * This is the vCPU 0 pvclock page. We only use pvclock from the vDSO + * if the hypervisor tells us that all vCPUs can get valid data from the + * vCPU 0 page. + */ +extern struct pvclock_vsyscall_time_info pvclock_page + __attribute__((visibility("hidden"))); +#endif + +#ifdef CONFIG_HYPERV_TSCPAGE +extern struct ms_hyperv_tsc_page hvclock_page + __attribute__((visibility("hidden"))); +#endif + +#ifndef BUILD_VDSO32 + +static __always_inline +long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts) +{ + long ret; + + asm ("syscall" : "=a" (ret), "=m" (*_ts) : + "0" (__NR_clock_gettime), "D" (_clkid), "S" (_ts) : + "rcx", "r11"); + + return ret; +} + +static __always_inline +long gettimeofday_fallback(struct __kernel_old_timeval *_tv, + struct timezone *_tz) +{ + long ret; + + asm("syscall" : "=a" (ret) : + "0" (__NR_gettimeofday), "D" (_tv), "S" (_tz) : "memory"); + + return ret; +} + +static __always_inline +long clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts) +{ + long ret; + + asm ("syscall" : "=a" (ret), "=m" (*_ts) : + "0" (__NR_clock_getres), "D" (_clkid), "S" (_ts) : + "rcx", "r11"); + + return ret; +} + +#else + +#define VDSO_HAS_32BIT_FALLBACK 1 + +static __always_inline +long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts) +{ + long ret; + + asm ( + "mov %%ebx, %%edx \n" + "mov %[clock], %%ebx \n" + "call __kernel_vsyscall \n" + "mov %%edx, %%ebx \n" + : "=a" (ret), "=m" (*_ts) + : "0" (__NR_clock_gettime), [clock] "g" (_clkid), "c" (_ts) + : "edx"); + + return ret; +} + +static __always_inline +long clock_gettime32_fallback(clockid_t _clkid, struct old_timespec32 *_ts) +{ + long ret; + + asm ( + "mov %%ebx, %%edx \n" + "mov %[clock], %%ebx \n" + "call __kernel_vsyscall \n" + "mov %%edx, %%ebx \n" + : "=a" (ret), "=m" (*_ts) + : "0" (__NR_clock_gettime), [clock] "g" (_clkid), "c" (_ts) + : "edx"); + + return ret; +} + +static __always_inline +long gettimeofday_fallback(struct __kernel_old_timeval *_tv, + struct timezone *_tz) +{ + long ret; + + asm( + "mov %%ebx, %%edx \n" + "mov %2, %%ebx \n" + "call __kernel_vsyscall \n" + "mov %%edx, %%ebx \n" + : "=a" (ret) + : "0" (__NR_gettimeofday), "g" (_tv), "c" (_tz) + : "memory", "edx"); + + return ret; +} + +static __always_inline long +clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts) +{ + long ret; + + asm ( + "mov %%ebx, %%edx \n" + "mov %[clock], %%ebx \n" + "call __kernel_vsyscall \n" + "mov %%edx, %%ebx \n" + : "=a" (ret), "=m" (*_ts) + : "0" (__NR_clock_getres), [clock] "g" (_clkid), "c" (_ts) + : "edx"); + + return ret; +} + +static __always_inline +long clock_getres32_fallback(clockid_t _clkid, struct old_timespec32 *_ts) +{ + long ret; + + asm ( + "mov %%ebx, %%edx \n" + "mov %[clock], %%ebx \n" + "call __kernel_vsyscall \n" + "mov %%edx, %%ebx \n" + : "=a" (ret), "=m" (*_ts) + : "0" (__NR_clock_getres), [clock] "g" (_clkid), "c" (_ts) + : "edx"); + + return ret; +} + +#endif + +#ifdef CONFIG_PARAVIRT_CLOCK +static u64 vread_pvclock(void) +{ + const struct pvclock_vcpu_time_info *pvti = &pvclock_page.pvti; + u32 version; + u64 ret; + + /* + * Note: The kernel and hypervisor must guarantee that cpu ID + * number maps 1:1 to per-CPU pvclock time info. + * + * Because the hypervisor is entirely unaware of guest userspace + * preemption, it cannot guarantee that per-CPU pvclock time + * info is updated if the underlying CPU changes or that that + * version is increased whenever underlying CPU changes. + * + * On KVM, we are guaranteed that pvti updates for any vCPU are + * atomic as seen by *all* vCPUs. This is an even stronger + * guarantee than we get with a normal seqlock. + * + * On Xen, we don't appear to have that guarantee, but Xen still + * supplies a valid seqlock using the version field. + * + * We only do pvclock vdso timing at all if + * PVCLOCK_TSC_STABLE_BIT is set, and we interpret that bit to + * mean that all vCPUs have matching pvti and that the TSC is + * synced, so we can just look at vCPU 0's pvti. + */ + + do { + version = pvclock_read_begin(pvti); + + if (unlikely(!(pvti->flags & PVCLOCK_TSC_STABLE_BIT))) + return U64_MAX; + + ret = __pvclock_read_cycles(pvti, rdtsc_ordered()); + } while (pvclock_read_retry(pvti, version)); + + return ret; +} +#endif + +#ifdef CONFIG_HYPERV_TSCPAGE +static u64 vread_hvclock(void) +{ + return hv_read_tsc_page(&hvclock_page); +} +#endif + +static inline u64 __arch_get_hw_counter(s32 clock_mode) +{ + if (clock_mode == VCLOCK_TSC) + return (u64)rdtsc_ordered(); + /* + * For any memory-mapped vclock type, we need to make sure that gcc + * doesn't cleverly hoist a load before the mode check. Otherwise we + * might end up touching the memory-mapped page even if the vclock in + * question isn't enabled, which will segfault. Hence the barriers. + */ +#ifdef CONFIG_PARAVIRT_CLOCK + if (clock_mode == VCLOCK_PVCLOCK) { + barrier(); + return vread_pvclock(); + } +#endif +#ifdef CONFIG_HYPERV_TSCPAGE + if (clock_mode == VCLOCK_HVCLOCK) { + barrier(); + return vread_hvclock(); + } +#endif + return U64_MAX; +} + +static __always_inline const struct vdso_data *__arch_get_vdso_data(void) +{ + return __vdso_data; +} + +/* + * x86 specific delta calculation. + * + * The regular implementation assumes that clocksource reads are globally + * monotonic. The TSC can be slightly off across sockets which can cause + * the regular delta calculation (@cycles - @last) to return a huge time + * jump. + * + * Therefore it needs to be verified that @cycles are greater than + * @last. If not then use @last, which is the base time of the current + * conversion period. + * + * This variant also removes the masking of the subtraction because the + * clocksource mask of all VDSO capable clocksources on x86 is U64_MAX + * which would result in a pointless operation. The compiler cannot + * optimize it away as the mask comes from the vdso data and is not compile + * time constant. + */ +static __always_inline +u64 vdso_calc_delta(u64 cycles, u64 last, u64 mask, u32 mult) +{ + if (cycles > last) + return (cycles - last) * mult; + return 0; +} +#define vdso_calc_delta vdso_calc_delta + +#endif /* !__ASSEMBLY__ */ + +#endif /* __ASM_VDSO_GETTIMEOFDAY_H */ diff --git a/arch/x86/include/asm/vdso/processor.h b/arch/x86/include/asm/vdso/processor.h new file mode 100644 index 0000000000000000000000000000000000000000..57b1a7034c640add013166f1b455b8b68c9ef4e0 --- /dev/null +++ b/arch/x86/include/asm/vdso/processor.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2020 ARM Ltd. + */ +#ifndef __ASM_VDSO_PROCESSOR_H +#define __ASM_VDSO_PROCESSOR_H + +#ifndef __ASSEMBLY__ + +/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ +static __always_inline void rep_nop(void) +{ + asm volatile("rep; nop" ::: "memory"); +} + +static __always_inline void cpu_relax(void) +{ + rep_nop(); +} + +#endif /* __ASSEMBLY__ */ + +#endif /* __ASM_VDSO_PROCESSOR_H */ diff --git a/arch/x86/include/asm/vdso/vsyscall.h b/arch/x86/include/asm/vdso/vsyscall.h new file mode 100644 index 0000000000000000000000000000000000000000..0026ab2123ce96756114a897439a3966f4755416 --- /dev/null +++ b/arch/x86/include/asm/vdso/vsyscall.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_VDSO_VSYSCALL_H +#define __ASM_VDSO_VSYSCALL_H + +#ifndef __ASSEMBLY__ + +#include +#include +#include +#include +#include + +int vclocks_used __read_mostly; + +DEFINE_VVAR(struct vdso_data, _vdso_data); +/* + * Update the vDSO data page to keep in sync with kernel timekeeping. + */ +static __always_inline +struct vdso_data *__x86_get_k_vdso_data(void) +{ + return _vdso_data; +} +#define __arch_get_k_vdso_data __x86_get_k_vdso_data + +static __always_inline +int __x86_get_clock_mode(struct timekeeper *tk) +{ + int vclock_mode = tk->tkr_mono.clock->archdata.vclock_mode; + + /* Mark the new vclock used. */ + BUILD_BUG_ON(VCLOCK_MAX >= 32); + WRITE_ONCE(vclocks_used, READ_ONCE(vclocks_used) | (1 << vclock_mode)); + + return vclock_mode; +} +#define __arch_get_clock_mode __x86_get_clock_mode + +/* The asm-generic header needs to be included after the definitions above */ +#include + +#endif /* !__ASSEMBLY__ */ + +#endif /* __ASM_VDSO_VSYSCALL_H */ diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h index 53748541c487ab00d8bd1ee9b4d05305aa33171d..44ec25be6bb5d5fbbcf92bd13f975afa4f276ba8 100644 --- a/arch/x86/include/asm/vgtod.h +++ b/arch/x86/include/asm/vgtod.h @@ -3,40 +3,15 @@ #define _ASM_X86_VGTOD_H #include -#include +#include +#include +#include #ifdef BUILD_VDSO32_64 typedef u64 gtod_long_t; #else typedef unsigned long gtod_long_t; #endif -/* - * vsyscall_gtod_data will be accessed by 32 and 64 bit code at the same time - * so be carefull by modifying this structure. - */ -struct vsyscall_gtod_data { - unsigned seq; - - int vclock_mode; - u64 cycle_last; - u64 mask; - u32 mult; - u32 shift; - - /* open coded 'struct timespec' */ - u64 wall_time_snsec; - gtod_long_t wall_time_sec; - gtod_long_t monotonic_time_sec; - u64 monotonic_time_snsec; - gtod_long_t wall_time_coarse_sec; - gtod_long_t wall_time_coarse_nsec; - gtod_long_t monotonic_time_coarse_sec; - gtod_long_t monotonic_time_coarse_nsec; - - int tz_minuteswest; - int tz_dsttime; -}; -extern struct vsyscall_gtod_data vsyscall_gtod_data; extern int vclocks_used; static inline bool vclock_was_used(int vclock) @@ -44,63 +19,4 @@ static inline bool vclock_was_used(int vclock) return READ_ONCE(vclocks_used) & (1 << vclock); } -static inline unsigned gtod_read_begin(const struct vsyscall_gtod_data *s) -{ - unsigned ret; - -repeat: - ret = READ_ONCE(s->seq); - if (unlikely(ret & 1)) { - cpu_relax(); - goto repeat; - } - smp_rmb(); - return ret; -} - -static inline int gtod_read_retry(const struct vsyscall_gtod_data *s, - unsigned start) -{ - smp_rmb(); - return unlikely(s->seq != start); -} - -static inline void gtod_write_begin(struct vsyscall_gtod_data *s) -{ - ++s->seq; - smp_wmb(); -} - -static inline void gtod_write_end(struct vsyscall_gtod_data *s) -{ - smp_wmb(); - ++s->seq; -} - -#ifdef CONFIG_X86_64 - -#define VGETCPU_CPU_MASK 0xfff - -static inline unsigned int __getcpu(void) -{ - unsigned int p; - - /* - * Load per CPU data from GDT. LSL is faster than RDTSCP and - * works on all CPUs. This is volatile so that it orders - * correctly wrt barrier() and to keep gcc from cleverly - * hoisting it out of the calling function. - * - * If RDPID is available, use it. - */ - alternative_io ("lsl %[seg],%[p]", - ".byte 0xf3,0x0f,0xc7,0xf8", /* RDPID %eax/rax */ - X86_FEATURE_RDPID, - [p] "=a" (p), [seg] "r" (__PER_CPU_SEG)); - - return p; -} - -#endif /* CONFIG_X86_64 */ - #endif /* _ASM_X86_VGTOD_H */ diff --git a/arch/x86/include/asm/vvar.h b/arch/x86/include/asm/vvar.h index 3f32dfc2ab7350b6130bcfca317932f431d6cb99..2dbde48f27a99316195a3b5bbdbc29d3b942fa8a 100644 --- a/arch/x86/include/asm/vvar.h +++ b/arch/x86/include/asm/vvar.h @@ -32,19 +32,20 @@ extern char __vvar_page; #define DECLARE_VVAR(offset, type, name) \ - extern type vvar_ ## name __attribute__((visibility("hidden"))); + extern type vvar_ ## name[CS_BASES] \ + __attribute__((visibility("hidden"))); #define VVAR(name) (vvar_ ## name) #define DEFINE_VVAR(type, name) \ - type name \ + type name[CS_BASES] \ __attribute__((section(".vvar_" #name), aligned(16))) __visible #endif /* DECLARE_VVAR(offset, type, name) */ -DECLARE_VVAR(128, struct vsyscall_gtod_data, vsyscall_gtod_data) +DECLARE_VVAR(128, struct vdso_data, _vdso_data) #undef DECLARE_VVAR diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 3b20607d581b5340fcf5e0345b7e70f9e83d95ad..7303bb398862c08ece19fb2cf06929280592eddd 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -1752,7 +1752,7 @@ int __acpi_acquire_global_lock(unsigned int *lock) new = (((old & ~0x3) + 2) + ((old >> 1) & 0x1)); val = cmpxchg(lock, old, new); } while (unlikely (val != old)); - return (new < 3) ? -1 : 0; + return ((new & 0x3) < 3) ? -1 : 0; } int __acpi_release_global_lock(unsigned int *lock) diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c index 158ad1483c4352b2f93c7cbb931dfdb8dd40c3bf..92539a1c3e317d618e091db73b86a618cc01c0f9 100644 --- a/arch/x86/kernel/acpi/cstate.c +++ b/arch/x86/kernel/acpi/cstate.c @@ -133,7 +133,8 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu, /* Make sure we are running on right CPU */ - retval = work_on_cpu(cpu, acpi_processor_ffh_cstate_probe_cpu, cx); + retval = call_on_cpu(cpu, acpi_processor_ffh_cstate_probe_cpu, cx, + false); if (retval == 0) { /* Use the hint in CST */ percpu_entry->states[cx->index].eax = cx->address; diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 7f43eba8d0c105b5736738285b7d23c97cf992a3..35c1a124be1df9f65e39faf08d8a543aebed1d35 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1729,6 +1729,29 @@ static void wait_for_master_cpu(int cpu) #endif } +#ifdef CONFIG_X86_64 +static void setup_getcpu(int cpu) +{ + unsigned long cpudata = vdso_encode_cpunode(cpu, early_cpu_to_node(cpu)); + struct desc_struct d = { }; + + if (static_cpu_has(X86_FEATURE_RDTSCP)) + write_rdtscp_aux(cpudata); + + /* Store CPU and node number in limit. */ + d.limit0 = cpudata; + d.limit1 = cpudata >> 16; + + d.type = 5; /* RO data, expand down, accessed */ + d.dpl = 3; /* Visible to user code */ + d.s = 1; /* Not a system segment */ + d.p = 1; /* Present */ + d.d = 1; /* 32-bit */ + + write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_CPUNODE, &d, DESCTYPE_S); +} +#endif + /* * cpu_init() initializes state that is per-CPU. Some data is already * initialized (naturally) in the bootstrap process, such as the GDT @@ -1766,6 +1789,7 @@ void cpu_init(void) early_cpu_to_node(cpu) != NUMA_NO_NODE) set_numa_node(early_cpu_to_node(cpu)); #endif + setup_getcpu(cpu); me = current; diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c index 1c92cd08c3b4e55e241428edf16677f09306af66..b32fa6bcf811f69aaac86d6038d170f0c97e464b 100644 --- a/arch/x86/kernel/cpu/intel_rdt.c +++ b/arch/x86/kernel/cpu/intel_rdt.c @@ -555,6 +555,8 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r) d->id = id; cpumask_set_cpu(cpu, &d->cpu_mask); + rdt_domain_reconfigure_cdp(r); + if (r->alloc_capable && domain_setup_ctrlval(r, d)) { kfree(d); return; diff --git a/arch/x86/kernel/cpu/intel_rdt.h b/arch/x86/kernel/cpu/intel_rdt.h index 3736f6dc95450f6f51204946d351b47e27feacf8..2b483b739cf1bb204b443cbf3bfa5ac25cf2a139 100644 --- a/arch/x86/kernel/cpu/intel_rdt.h +++ b/arch/x86/kernel/cpu/intel_rdt.h @@ -567,5 +567,6 @@ void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms); void cqm_handle_limbo(struct work_struct *work); bool has_busy_rmid(struct rdt_resource *r, struct rdt_domain *d); void __check_limbo(struct rdt_domain *d, bool force_free); +void rdt_domain_reconfigure_cdp(struct rdt_resource *r); #endif /* _ASM_X86_INTEL_RDT_H */ diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c index 11c5accfa4db5a5f47b229cfbd094516a9578783..cea7e01a346d3a789a7c97acfe037682e787f3dd 100644 --- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c +++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c @@ -1777,6 +1777,19 @@ static int set_cache_qos_cfg(int level, bool enable) return 0; } +/* Restore the qos cfg state when a domain comes online */ +void rdt_domain_reconfigure_cdp(struct rdt_resource *r) +{ + if (!r->alloc_capable) + return; + + if (r == &rdt_resources_all[RDT_RESOURCE_L2DATA]) + l2_qos_cfg_update(&r->alloc_enabled); + + if (r == &rdt_resources_all[RDT_RESOURCE_L3DATA]) + l3_qos_cfg_update(&r->alloc_enabled); +} + /* * Enable or disable the MBA software controller * which helps user specify bandwidth in MBps. @@ -2910,7 +2923,8 @@ static int rdtgroup_rmdir(struct kernfs_node *kn) * If the rdtgroup is a mon group and parent directory * is a valid "mon_groups" directory, remove the mon group. */ - if (rdtgrp->type == RDTCTRL_GROUP && parent_kn == rdtgroup_default.kn) { + if (rdtgrp->type == RDTCTRL_GROUP && parent_kn == rdtgroup_default.kn && + rdtgrp != &rdtgroup_default) { if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { ret = rdtgroup_ctrl_remove(kn, rdtgrp); diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c index 852e74e48890b26c3957993f7d2dc9e591341b83..f8b0fa2dbe37428670644b71a32a4433e00cfcbf 100644 --- a/arch/x86/kernel/cpu/mshyperv.c +++ b/arch/x86/kernel/cpu/mshyperv.c @@ -214,8 +214,8 @@ static void __init ms_hyperv_init_platform(void) ms_hyperv.misc_features = cpuid_edx(HYPERV_CPUID_FEATURES); ms_hyperv.hints = cpuid_eax(HYPERV_CPUID_ENLIGHTMENT_INFO); - pr_info("Hyper-V: features 0x%x, hints 0x%x\n", - ms_hyperv.features, ms_hyperv.hints); + pr_info("Hyper-V: features 0x%x, hints 0x%x, misc 0x%x\n", + ms_hyperv.features, ms_hyperv.hints, ms_hyperv.misc_features); ms_hyperv.max_vp_index = cpuid_eax(HYPERV_CPUID_IMPLEMENT_LIMITS); ms_hyperv.max_lp_index = cpuid_ebx(HYPERV_CPUID_IMPLEMENT_LIMITS); @@ -250,6 +250,16 @@ static void __init ms_hyperv_init_platform(void) cpuid_eax(HYPERV_CPUID_NESTED_FEATURES); } + /* + * Hyper-V expects to get crash register data or kmsg when + * crash enlightment is available and system crashes. Set + * crash_kexec_post_notifiers to be true to make sure that + * calling crash enlightment interface before running kdump + * kernel. + */ + if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) + crash_kexec_post_notifiers = true; + #ifdef CONFIG_X86_LOCAL_APIC if (ms_hyperv.features & HV_X64_ACCESS_FREQUENCY_MSRS && ms_hyperv.misc_features & HV_FEATURE_FREQUENCY_MSRS_AVAILABLE) { diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c index 637982efecd80cbee7964f3473367cbfbd96191d..899d0fbbd33926ee7c24491223f41d173df5830e 100644 --- a/arch/x86/kernel/pvclock.c +++ b/arch/x86/kernel/pvclock.c @@ -15,6 +15,7 @@ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ +#include #include #include #include diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c index 89be1be1790c413f2030eaee83c379c58bafde5c..169b96492b7cd033b0e88b85dfca7c11b319525b 100644 --- a/arch/x86/kernel/unwind_orc.c +++ b/arch/x86/kernel/unwind_orc.c @@ -131,9 +131,6 @@ static struct orc_entry *orc_find(unsigned long ip) { static struct orc_entry *orc; - if (!orc_init) - return NULL; - if (ip == 0) return &null_orc_entry; @@ -367,9 +364,38 @@ static bool deref_stack_iret_regs(struct unwind_state *state, unsigned long addr return true; } +/* + * If state->regs is non-NULL, and points to a full pt_regs, just get the reg + * value from state->regs. + * + * Otherwise, if state->regs just points to IRET regs, and the previous frame + * had full regs, it's safe to get the value from the previous regs. This can + * happen when early/late IRQ entry code gets interrupted by an NMI. + */ +static bool get_reg(struct unwind_state *state, unsigned int reg_off, + unsigned long *val) +{ + unsigned int reg = reg_off/8; + + if (!state->regs) + return false; + + if (state->full_regs) { + *val = ((unsigned long *)state->regs)[reg]; + return true; + } + + if (state->prev_regs) { + *val = ((unsigned long *)state->prev_regs)[reg]; + return true; + } + + return false; +} + bool unwind_next_frame(struct unwind_state *state) { - unsigned long ip_p, sp, orig_ip = state->ip, prev_sp = state->sp; + unsigned long ip_p, sp, tmp, orig_ip = state->ip, prev_sp = state->sp; enum stack_type prev_type = state->stack_info.type; struct orc_entry *orc; bool indirect = false; @@ -423,39 +449,35 @@ bool unwind_next_frame(struct unwind_state *state) break; case ORC_REG_R10: - if (!state->regs || !state->full_regs) { + if (!get_reg(state, offsetof(struct pt_regs, r10), &sp)) { orc_warn("missing regs for base reg R10 at ip %pB\n", (void *)state->ip); goto err; } - sp = state->regs->r10; break; case ORC_REG_R13: - if (!state->regs || !state->full_regs) { + if (!get_reg(state, offsetof(struct pt_regs, r13), &sp)) { orc_warn("missing regs for base reg R13 at ip %pB\n", (void *)state->ip); goto err; } - sp = state->regs->r13; break; case ORC_REG_DI: - if (!state->regs || !state->full_regs) { + if (!get_reg(state, offsetof(struct pt_regs, di), &sp)) { orc_warn("missing regs for base reg DI at ip %pB\n", (void *)state->ip); goto err; } - sp = state->regs->di; break; case ORC_REG_DX: - if (!state->regs || !state->full_regs) { + if (!get_reg(state, offsetof(struct pt_regs, dx), &sp)) { orc_warn("missing regs for base reg DX at ip %pB\n", (void *)state->ip); goto err; } - sp = state->regs->dx; break; default: @@ -482,6 +504,7 @@ bool unwind_next_frame(struct unwind_state *state) state->sp = sp; state->regs = NULL; + state->prev_regs = NULL; state->signal = false; break; @@ -493,6 +516,7 @@ bool unwind_next_frame(struct unwind_state *state) } state->regs = (struct pt_regs *)sp; + state->prev_regs = NULL; state->full_regs = true; state->signal = true; break; @@ -504,6 +528,8 @@ bool unwind_next_frame(struct unwind_state *state) goto err; } + if (state->full_regs) + state->prev_regs = state->regs; state->regs = (void *)sp - IRET_FRAME_OFFSET; state->full_regs = false; state->signal = true; @@ -512,14 +538,14 @@ bool unwind_next_frame(struct unwind_state *state) default: orc_warn("unknown .orc_unwind entry type %d for ip %pB\n", orc->type, (void *)orig_ip); - break; + goto err; } /* Find BP: */ switch (orc->bp_reg) { case ORC_REG_UNDEFINED: - if (state->regs && state->full_regs) - state->bp = state->regs->bp; + if (get_reg(state, offsetof(struct pt_regs, bp), &tmp)) + state->bp = tmp; break; case ORC_REG_PREV_SP: @@ -563,6 +589,9 @@ EXPORT_SYMBOL_GPL(unwind_next_frame); void __unwind_start(struct unwind_state *state, struct task_struct *task, struct pt_regs *regs, unsigned long *first_frame) { + if (!orc_init) + goto done; + memset(state, 0, sizeof(*state)); state->task = task; @@ -629,7 +658,7 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task, /* Otherwise, skip ahead to the user-specified starting frame: */ while (!unwind_done(state) && (!on_stack(&state->stack_info, first_frame, sizeof(long)) || - state->sp <= (unsigned long)first_frame)) + state->sp < (unsigned long)first_frame)) unwind_next_frame(state); return; diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 48c24d0e9e75c746ae6c46e520b9aac9b3c8f8de..1fe9ccabc082a99ed60c051f59c1ad3eb05faad2 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -509,7 +509,8 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, entry->edx |= F(SPEC_CTRL); if (boot_cpu_has(X86_FEATURE_STIBP)) entry->edx |= F(INTEL_STIBP); - if (boot_cpu_has(X86_FEATURE_SSBD)) + if (boot_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) || + boot_cpu_has(X86_FEATURE_AMD_SSBD)) entry->edx |= F(SPEC_CTRL_SSBD); /* * We emulate ARCH_CAPABILITIES in software even diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index cc8f3b41a1b2e68de6308963e404ab5dcf1567b2..df2274414640e21d4333c5f3b89909a6b17c99aa 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -1917,6 +1917,10 @@ static void __unregister_enc_region_locked(struct kvm *kvm, static struct kvm *svm_vm_alloc(void) { struct kvm_svm *kvm_svm = vzalloc(sizeof(struct kvm_svm)); + + if (!kvm_svm) + return NULL; + return &kvm_svm->kvm; } diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index a81d7d9ce9d695a0c1847a75cfcc19469baebcfe..f08c287b62426339323cd18274e9fea258ed377b 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -2156,43 +2156,15 @@ static void vmcs_load(struct vmcs *vmcs) } #ifdef CONFIG_KEXEC_CORE -/* - * This bitmap is used to indicate whether the vmclear - * operation is enabled on all cpus. All disabled by - * default. - */ -static cpumask_t crash_vmclear_enabled_bitmap = CPU_MASK_NONE; - -static inline void crash_enable_local_vmclear(int cpu) -{ - cpumask_set_cpu(cpu, &crash_vmclear_enabled_bitmap); -} - -static inline void crash_disable_local_vmclear(int cpu) -{ - cpumask_clear_cpu(cpu, &crash_vmclear_enabled_bitmap); -} - -static inline int crash_local_vmclear_enabled(int cpu) -{ - return cpumask_test_cpu(cpu, &crash_vmclear_enabled_bitmap); -} - static void crash_vmclear_local_loaded_vmcss(void) { int cpu = raw_smp_processor_id(); struct loaded_vmcs *v; - if (!crash_local_vmclear_enabled(cpu)) - return; - list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu), loaded_vmcss_on_cpu_link) vmcs_clear(v->vmcs); } -#else -static inline void crash_enable_local_vmclear(int cpu) { } -static inline void crash_disable_local_vmclear(int cpu) { } #endif /* CONFIG_KEXEC_CORE */ static void __loaded_vmcs_clear(void *arg) @@ -2204,19 +2176,24 @@ static void __loaded_vmcs_clear(void *arg) return; /* vcpu migration can race with cpu offline */ if (per_cpu(current_vmcs, cpu) == loaded_vmcs->vmcs) per_cpu(current_vmcs, cpu) = NULL; - crash_disable_local_vmclear(cpu); + + vmcs_clear(loaded_vmcs->vmcs); + if (loaded_vmcs->shadow_vmcs && loaded_vmcs->launched) + vmcs_clear(loaded_vmcs->shadow_vmcs); + list_del(&loaded_vmcs->loaded_vmcss_on_cpu_link); /* - * we should ensure updating loaded_vmcs->loaded_vmcss_on_cpu_link - * is before setting loaded_vmcs->vcpu to -1 which is done in - * loaded_vmcs_init. Otherwise, other cpu can see vcpu = -1 fist - * then adds the vmcs into percpu list before it is deleted. + * Ensure all writes to loaded_vmcs, including deleting it from its + * current percpu list, complete before setting loaded_vmcs->vcpu to + * -1, otherwise a different cpu can see vcpu == -1 first and add + * loaded_vmcs to its percpu list before it's deleted from this cpu's + * list. Pairs with the smp_rmb() in vmx_vcpu_load_vmcs(). */ smp_wmb(); - loaded_vmcs_init(loaded_vmcs); - crash_enable_local_vmclear(cpu); + loaded_vmcs->cpu = -1; + loaded_vmcs->launched = 0; } static void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs) @@ -3067,18 +3044,17 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) if (!already_loaded) { loaded_vmcs_clear(vmx->loaded_vmcs); local_irq_disable(); - crash_disable_local_vmclear(cpu); /* - * Read loaded_vmcs->cpu should be before fetching - * loaded_vmcs->loaded_vmcss_on_cpu_link. - * See the comments in __loaded_vmcs_clear(). + * Ensure loaded_vmcs->cpu is read before adding loaded_vmcs to + * this cpu's percpu list, otherwise it may not yet be deleted + * from its previous cpu's percpu list. Pairs with the + * smb_wmb() in __loaded_vmcs_clear(). */ smp_rmb(); list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link, &per_cpu(loaded_vmcss_on_cpu, cpu)); - crash_enable_local_vmclear(cpu); local_irq_enable(); } @@ -4422,21 +4398,6 @@ static int hardware_enable(void) !hv_get_vp_assist_page(cpu)) return -EFAULT; - INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu)); - INIT_LIST_HEAD(&per_cpu(blocked_vcpu_on_cpu, cpu)); - spin_lock_init(&per_cpu(blocked_vcpu_on_cpu_lock, cpu)); - - /* - * Now we can enable the vmclear operation in kdump - * since the loaded_vmcss_on_cpu list on this cpu - * has been initialized. - * - * Though the cpu is not in VMX operation now, there - * is no problem to enable the vmclear operation - * for the loaded_vmcss_on_cpu list is empty! - */ - crash_enable_local_vmclear(cpu); - rdmsrl(MSR_IA32_FEATURE_CONTROL, old); test_bits = FEATURE_CONTROL_LOCKED; @@ -6954,8 +6915,13 @@ static int vmx_nmi_allowed(struct kvm_vcpu *vcpu) static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu) { - return (!to_vmx(vcpu)->nested.nested_run_pending && - vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) && + if (to_vmx(vcpu)->nested.nested_run_pending) + return false; + + if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu)) + return true; + + return (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) && !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS)); } @@ -7049,7 +7015,7 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu, */ static void kvm_machine_check(void) { -#if defined(CONFIG_X86_MCE) && defined(CONFIG_X86_64) +#if defined(CONFIG_X86_MCE) struct pt_regs regs = { .cs = 3, /* Fake ring 3 no matter what the guest ran on */ .flags = X86_EFLAGS_IF, @@ -10805,14 +10771,14 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) else if (static_branch_unlikely(&mds_user_clear)) mds_clear_cpu_buffers(); - asm( + asm volatile ( /* Store host registers */ "push %%" _ASM_DX "; push %%" _ASM_BP ";" "push %%" _ASM_CX " \n\t" /* placeholder for guest rcx */ "push %%" _ASM_CX " \n\t" - "cmp %%" _ASM_SP ", %c[host_rsp](%0) \n\t" + "cmp %%" _ASM_SP ", %c[host_rsp](%%" _ASM_CX ") \n\t" "je 1f \n\t" - "mov %%" _ASM_SP ", %c[host_rsp](%0) \n\t" + "mov %%" _ASM_SP ", %c[host_rsp](%%" _ASM_CX ") \n\t" /* Avoid VMWRITE when Enlightened VMCS is in use */ "test %%" _ASM_SI ", %%" _ASM_SI " \n\t" "jz 2f \n\t" @@ -10822,32 +10788,33 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) __ex(ASM_VMX_VMWRITE_RSP_RDX) "\n\t" "1: \n\t" /* Reload cr2 if changed */ - "mov %c[cr2](%0), %%" _ASM_AX " \n\t" + "mov %c[cr2](%%" _ASM_CX "), %%" _ASM_AX " \n\t" "mov %%cr2, %%" _ASM_DX " \n\t" "cmp %%" _ASM_AX ", %%" _ASM_DX " \n\t" "je 3f \n\t" "mov %%" _ASM_AX", %%cr2 \n\t" "3: \n\t" /* Check if vmlaunch of vmresume is needed */ - "cmpb $0, %c[launched](%0) \n\t" + "cmpb $0, %c[launched](%%" _ASM_CX ") \n\t" /* Load guest registers. Don't clobber flags. */ - "mov %c[rax](%0), %%" _ASM_AX " \n\t" - "mov %c[rbx](%0), %%" _ASM_BX " \n\t" - "mov %c[rdx](%0), %%" _ASM_DX " \n\t" - "mov %c[rsi](%0), %%" _ASM_SI " \n\t" - "mov %c[rdi](%0), %%" _ASM_DI " \n\t" - "mov %c[rbp](%0), %%" _ASM_BP " \n\t" + "mov %c[rax](%%" _ASM_CX "), %%" _ASM_AX " \n\t" + "mov %c[rbx](%%" _ASM_CX "), %%" _ASM_BX " \n\t" + "mov %c[rdx](%%" _ASM_CX "), %%" _ASM_DX " \n\t" + "mov %c[rsi](%%" _ASM_CX "), %%" _ASM_SI " \n\t" + "mov %c[rdi](%%" _ASM_CX "), %%" _ASM_DI " \n\t" + "mov %c[rbp](%%" _ASM_CX "), %%" _ASM_BP " \n\t" #ifdef CONFIG_X86_64 - "mov %c[r8](%0), %%r8 \n\t" - "mov %c[r9](%0), %%r9 \n\t" - "mov %c[r10](%0), %%r10 \n\t" - "mov %c[r11](%0), %%r11 \n\t" - "mov %c[r12](%0), %%r12 \n\t" - "mov %c[r13](%0), %%r13 \n\t" - "mov %c[r14](%0), %%r14 \n\t" - "mov %c[r15](%0), %%r15 \n\t" + "mov %c[r8](%%" _ASM_CX "), %%r8 \n\t" + "mov %c[r9](%%" _ASM_CX "), %%r9 \n\t" + "mov %c[r10](%%" _ASM_CX "), %%r10 \n\t" + "mov %c[r11](%%" _ASM_CX "), %%r11 \n\t" + "mov %c[r12](%%" _ASM_CX "), %%r12 \n\t" + "mov %c[r13](%%" _ASM_CX "), %%r13 \n\t" + "mov %c[r14](%%" _ASM_CX "), %%r14 \n\t" + "mov %c[r15](%%" _ASM_CX "), %%r15 \n\t" #endif - "mov %c[rcx](%0), %%" _ASM_CX " \n\t" /* kills %0 (ecx) */ + /* Load guest RCX. This kills the vmx_vcpu pointer! */ + "mov %c[rcx](%%" _ASM_CX "), %%" _ASM_CX " \n\t" /* Enter guest mode */ "jne 1f \n\t" @@ -10855,26 +10822,42 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) "jmp 2f \n\t" "1: " __ex(ASM_VMX_VMRESUME) "\n\t" "2: " - /* Save guest registers, load host registers, keep flags */ - "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t" - "pop %0 \n\t" - "setbe %c[fail](%0)\n\t" - "mov %%" _ASM_AX ", %c[rax](%0) \n\t" - "mov %%" _ASM_BX ", %c[rbx](%0) \n\t" - __ASM_SIZE(pop) " %c[rcx](%0) \n\t" - "mov %%" _ASM_DX ", %c[rdx](%0) \n\t" - "mov %%" _ASM_SI ", %c[rsi](%0) \n\t" - "mov %%" _ASM_DI ", %c[rdi](%0) \n\t" - "mov %%" _ASM_BP ", %c[rbp](%0) \n\t" + + /* Save guest's RCX to the stack placeholder (see above) */ + "mov %%" _ASM_CX ", %c[wordsize](%%" _ASM_SP ") \n\t" + + /* Load host's RCX, i.e. the vmx_vcpu pointer */ + "pop %%" _ASM_CX " \n\t" + + /* Set vmx->fail based on EFLAGS.{CF,ZF} */ + "setbe %c[fail](%%" _ASM_CX ")\n\t" + + /* Save all guest registers, including RCX from the stack */ + "mov %%" _ASM_AX ", %c[rax](%%" _ASM_CX ") \n\t" + "mov %%" _ASM_BX ", %c[rbx](%%" _ASM_CX ") \n\t" + __ASM_SIZE(pop) " %c[rcx](%%" _ASM_CX ") \n\t" + "mov %%" _ASM_DX ", %c[rdx](%%" _ASM_CX ") \n\t" + "mov %%" _ASM_SI ", %c[rsi](%%" _ASM_CX ") \n\t" + "mov %%" _ASM_DI ", %c[rdi](%%" _ASM_CX ") \n\t" + "mov %%" _ASM_BP ", %c[rbp](%%" _ASM_CX ") \n\t" #ifdef CONFIG_X86_64 - "mov %%r8, %c[r8](%0) \n\t" - "mov %%r9, %c[r9](%0) \n\t" - "mov %%r10, %c[r10](%0) \n\t" - "mov %%r11, %c[r11](%0) \n\t" - "mov %%r12, %c[r12](%0) \n\t" - "mov %%r13, %c[r13](%0) \n\t" - "mov %%r14, %c[r14](%0) \n\t" - "mov %%r15, %c[r15](%0) \n\t" + "mov %%r8, %c[r8](%%" _ASM_CX ") \n\t" + "mov %%r9, %c[r9](%%" _ASM_CX ") \n\t" + "mov %%r10, %c[r10](%%" _ASM_CX ") \n\t" + "mov %%r11, %c[r11](%%" _ASM_CX ") \n\t" + "mov %%r12, %c[r12](%%" _ASM_CX ") \n\t" + "mov %%r13, %c[r13](%%" _ASM_CX ") \n\t" + "mov %%r14, %c[r14](%%" _ASM_CX ") \n\t" + "mov %%r15, %c[r15](%%" _ASM_CX ") \n\t" + + /* + * Clear all general purpose registers (except RSP, which is loaded by + * the CPU during VM-Exit) to prevent speculative use of the guest's + * values, even those that are saved/loaded via the stack. In theory, + * an L1 cache miss when restoring registers could lead to speculative + * execution with the guest's values. Zeroing XORs are dirt cheap, + * i.e. the extra paranoia is essentially free. + */ "xor %%r8d, %%r8d \n\t" "xor %%r9d, %%r9d \n\t" "xor %%r10d, %%r10d \n\t" @@ -10885,18 +10868,22 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) "xor %%r15d, %%r15d \n\t" #endif "mov %%cr2, %%" _ASM_AX " \n\t" - "mov %%" _ASM_AX ", %c[cr2](%0) \n\t" + "mov %%" _ASM_AX ", %c[cr2](%%" _ASM_CX ") \n\t" "xor %%eax, %%eax \n\t" "xor %%ebx, %%ebx \n\t" + "xor %%ecx, %%ecx \n\t" + "xor %%edx, %%edx \n\t" "xor %%esi, %%esi \n\t" "xor %%edi, %%edi \n\t" + "xor %%ebp, %%ebp \n\t" "pop %%" _ASM_BP "; pop %%" _ASM_DX " \n\t" ".pushsection .rodata \n\t" ".global vmx_return \n\t" "vmx_return: " _ASM_PTR " 2b \n\t" ".popsection" - : : "c"(vmx), "d"((unsigned long)HOST_RSP), "S"(evmcs_rsp), + : "=c"((int){0}), "=d"((int){0}), "=S"((int){0}) + : "c"(vmx), "d"((unsigned long)HOST_RSP), "S"(evmcs_rsp), [launched]"i"(offsetof(struct vcpu_vmx, __launched)), [fail]"i"(offsetof(struct vcpu_vmx, fail)), [host_rsp]"i"(offsetof(struct vcpu_vmx, host_rsp)), @@ -11016,6 +11003,10 @@ STACK_FRAME_NON_STANDARD(vmx_vcpu_run); static struct kvm *vmx_vm_alloc(void) { struct kvm_vmx *kvm_vmx = vzalloc(sizeof(struct kvm_vmx)); + + if (!kvm_vmx) + return NULL; + return &kvm_vmx->kvm; } @@ -12155,13 +12146,9 @@ static void prepare_vmcs02_full(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) set_cr4_guest_host_mask(vmx); - if (kvm_mpx_supported()) { - if (vmx->nested.nested_run_pending && - (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)) - vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs); - else - vmcs_write64(GUEST_BNDCFGS, vmx->nested.vmcs01_guest_bndcfgs); - } + if (kvm_mpx_supported() && vmx->nested.nested_run_pending && + (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)) + vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs); if (enable_vpid) { if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02) @@ -12225,6 +12212,9 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, kvm_set_dr(vcpu, 7, vcpu->arch.dr7); vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.vmcs01_debugctl); } + if (kvm_mpx_supported() && (!vmx->nested.nested_run_pending || + !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))) + vmcs_write64(GUEST_BNDCFGS, vmx->nested.vmcs01_guest_bndcfgs); if (vmx->nested.nested_run_pending) { vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, vmcs12->vm_entry_intr_info_field); @@ -12990,7 +12980,7 @@ static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu, } } -static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr) +static int vmx_check_nested_events(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); unsigned long exit_qual; @@ -13028,8 +13018,7 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr) return 0; } - if ((kvm_cpu_has_interrupt(vcpu) || external_intr) && - nested_exit_on_intr(vcpu)) { + if (kvm_cpu_has_interrupt(vcpu) && nested_exit_on_intr(vcpu)) { if (block_nested_events) return -EBUSY; nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0); @@ -13607,17 +13596,8 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; if (likely(!vmx->fail)) { - /* - * TODO: SDM says that with acknowledge interrupt on - * exit, bit 31 of the VM-exit interrupt information - * (valid interrupt) is always set to 1 on - * EXIT_REASON_EXTERNAL_INTERRUPT, so we shouldn't - * need kvm_cpu_has_interrupt(). See the commit - * message for details. - */ - if (nested_exit_intr_ack_set(vcpu) && - exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT && - kvm_cpu_has_interrupt(vcpu)) { + if (exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT && + nested_exit_intr_ack_set(vcpu)) { int irq = kvm_cpu_get_interrupt(vcpu); WARN_ON(irq < 0); vmcs12->vm_exit_intr_info = irq | @@ -14590,7 +14570,7 @@ module_exit(vmx_exit); static int __init vmx_init(void) { - int r; + int r, cpu; #if IS_ENABLED(CONFIG_HYPERV) /* @@ -14641,6 +14621,12 @@ static int __init vmx_init(void) } } + for_each_possible_cpu(cpu) { + INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu)); + INIT_LIST_HEAD(&per_cpu(blocked_vcpu_on_cpu, cpu)); + spin_lock_init(&per_cpu(blocked_vcpu_on_cpu_lock, cpu)); + } + #ifdef CONFIG_KEXEC_CORE rcu_assign_pointer(crash_vmclear_loaded_vmcss, crash_vmclear_local_loaded_vmcss); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 2cb379e261c0fc6af3908a8181b7d72cc6bdfb2d..6bfc9eaf8dee0153344a8fe737f00638edef31ad 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2397,43 +2397,45 @@ static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa) static void record_steal_time(struct kvm_vcpu *vcpu) { + struct kvm_host_map map; + struct kvm_steal_time *st; + if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) return; - if (unlikely(kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.st.stime, - &vcpu->arch.st.steal, sizeof(struct kvm_steal_time)))) + /* -EAGAIN is returned in atomic context so we can just return. */ + if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT, + &map, &vcpu->arch.st.cache, false)) return; + st = map.hva + + offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS); + /* * Doing a TLB flush here, on the guest's behalf, can avoid * expensive IPIs. */ - if (xchg(&vcpu->arch.st.steal.preempted, 0) & KVM_VCPU_FLUSH_TLB) + if (xchg(&st->preempted, 0) & KVM_VCPU_FLUSH_TLB) kvm_vcpu_flush_tlb(vcpu, false); - if (vcpu->arch.st.steal.version & 1) - vcpu->arch.st.steal.version += 1; /* first time write, random junk */ + vcpu->arch.st.preempted = 0; - vcpu->arch.st.steal.version += 1; + if (st->version & 1) + st->version += 1; /* first time write, random junk */ - kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime, - &vcpu->arch.st.steal, sizeof(struct kvm_steal_time)); + st->version += 1; smp_wmb(); - vcpu->arch.st.steal.steal += current->sched_info.run_delay - + st->steal += current->sched_info.run_delay - vcpu->arch.st.last_steal; vcpu->arch.st.last_steal = current->sched_info.run_delay; - kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime, - &vcpu->arch.st.steal, sizeof(struct kvm_steal_time)); - smp_wmb(); - vcpu->arch.st.steal.version += 1; + st->version += 1; - kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime, - &vcpu->arch.st.steal, sizeof(struct kvm_steal_time)); + kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, false); } int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) @@ -2575,11 +2577,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) if (data & KVM_STEAL_RESERVED_MASK) return 1; - if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime, - data & KVM_STEAL_VALID_BITS, - sizeof(struct kvm_steal_time))) - return 1; - vcpu->arch.st.msr_val = data; if (!(data & KVM_MSR_ENABLED)) @@ -3272,18 +3269,25 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu) { + struct kvm_host_map map; + struct kvm_steal_time *st; + if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) return; - if (vcpu->arch.st.steal.preempted) + if (vcpu->arch.st.preempted) + return; + + if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT, &map, + &vcpu->arch.st.cache, true)) return; - vcpu->arch.st.steal.preempted = KVM_VCPU_PREEMPTED; + st = map.hva + + offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS); - kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.st.stime, - &vcpu->arch.st.steal.preempted, - offsetof(struct kvm_steal_time, preempted), - sizeof(vcpu->arch.st.steal.preempted)); + st->preempted = vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED; + + kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, true); } void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) @@ -7124,7 +7128,7 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu) kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr); } -static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win) +static int inject_pending_event(struct kvm_vcpu *vcpu) { int r; @@ -7160,7 +7164,7 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win) * from L2 to L1. */ if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) { - r = kvm_x86_ops->check_nested_events(vcpu, req_int_win); + r = kvm_x86_ops->check_nested_events(vcpu); if (r != 0) return r; } @@ -7210,7 +7214,7 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win) * KVM_REQ_EVENT only on certain events and not unconditionally? */ if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) { - r = kvm_x86_ops->check_nested_events(vcpu, req_int_win); + r = kvm_x86_ops->check_nested_events(vcpu); if (r != 0) return r; } @@ -7683,7 +7687,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) goto out; } - if (inject_pending_event(vcpu, req_int_win) != 0) + if (inject_pending_event(vcpu) != 0) req_immediate_exit = true; else { /* Enable SMI/NMI/IRQ window open exits if needed. @@ -7894,7 +7898,7 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu) static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu) { if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) - kvm_x86_ops->check_nested_events(vcpu, false); + kvm_x86_ops->check_nested_events(vcpu); return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && !vcpu->arch.apf.halted); @@ -8634,6 +8638,9 @@ static void fx_init(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) { void *wbinvd_dirty_mask = vcpu->arch.wbinvd_dirty_mask; + struct gfn_to_pfn_cache *cache = &vcpu->arch.st.cache; + + kvm_release_pfn(cache->pfn, cache->dirty, cache); kvmclock_reset(vcpu); @@ -9229,6 +9236,13 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, { int i; + /* + * Clear out the previous array pointers for the KVM_MR_MOVE case. The + * old arrays will be freed by __kvm_set_memory_region() if installing + * the new memslot is successful. + */ + memset(&slot->arch, 0, sizeof(slot->arch)); + for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) { struct kvm_lpage_info *linfo; unsigned long ugfn; @@ -9291,11 +9305,18 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) { + struct kvm_vcpu *vcpu; + int i; + /* * memslots->generation has been incremented. * mmio generation may have reached its maximum value. */ kvm_mmu_invalidate_mmio_sptes(kvm, gen); + + /* Force re-initialization of steal_time cache */ + kvm_for_each_vcpu(i, vcpu, kvm) + kvm_vcpu_kick(vcpu); } int kvm_arch_prepare_memory_region(struct kvm *kvm, @@ -9303,6 +9324,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, const struct kvm_userspace_memory_region *mem, enum kvm_mr_change change) { + if (change == KVM_MR_MOVE) + return kvm_arch_create_memslot(kvm, memslot, + mem->memory_size >> PAGE_SHIFT); + return 0; } diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index a32fc3d99407641c9db2ef591fe0cf20805ff2fb..46ab928312517d528b0c52dbed70138941a2e8a7 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -142,6 +142,19 @@ static bool is_ereg(u32 reg) BIT(BPF_REG_AX)); } +/* + * is_ereg_8l() == true if BPF register 'reg' is mapped to access x86-64 + * lower 8-bit registers dil,sil,bpl,spl,r8b..r15b, which need extra byte + * of encoding. al,cl,dl,bl have simpler encoding. + */ +static bool is_ereg_8l(u32 reg) +{ + return is_ereg(reg) || + (1 << reg) & (BIT(BPF_REG_1) | + BIT(BPF_REG_2) | + BIT(BPF_REG_FP)); +} + static bool is_axreg(u32 reg) { return reg == BPF_REG_0; @@ -751,9 +764,8 @@ st: if (is_imm8(insn->off)) /* STX: *(u8*)(dst_reg + off) = src_reg */ case BPF_STX | BPF_MEM | BPF_B: /* Emit 'mov byte ptr [rax + off], al' */ - if (is_ereg(dst_reg) || is_ereg(src_reg) || - /* We have to add extra byte for x86 SIL, DIL regs */ - src_reg == BPF_REG_1 || src_reg == BPF_REG_2) + if (is_ereg(dst_reg) || is_ereg_8l(src_reg)) + /* Add extra byte for eregs or SIL,DIL,BPL in src_reg */ EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88); else EMIT1(0x88); diff --git a/arch/x86/net/bpf_jit_comp32.c b/arch/x86/net/bpf_jit_comp32.c index 24d573bc550d9885537a19ab8582cc95a988f6ce..2eaf1900ba673942cafae69b59b92a7d1326ac7a 100644 --- a/arch/x86/net/bpf_jit_comp32.c +++ b/arch/x86/net/bpf_jit_comp32.c @@ -1830,7 +1830,9 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, STACK_VAR(dst_hi)); EMIT(0x0, 4); } else { - EMIT3(0xC7, add_1reg(0xC0, dst_hi), 0); + /* xor dst_hi,dst_hi */ + EMIT2(0x33, + add_2reg(0xC0, dst_hi, dst_hi)); } break; case BPF_DW: @@ -1968,8 +1970,8 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, goto emit_cond_jmp_signed; } case BPF_JMP | BPF_JSET | BPF_X: { - u8 dreg_lo = dstk ? IA32_EAX : dst_lo; - u8 dreg_hi = dstk ? IA32_EDX : dst_hi; + u8 dreg_lo = IA32_EAX; + u8 dreg_hi = IA32_EDX; u8 sreg_lo = sstk ? IA32_ECX : src_lo; u8 sreg_hi = sstk ? IA32_EBX : src_hi; @@ -1978,6 +1980,12 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, STACK_VAR(dst_lo)); EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX), STACK_VAR(dst_hi)); + } else { + /* mov dreg_lo,dst_lo */ + EMIT2(0x89, add_2reg(0xC0, dreg_lo, dst_lo)); + /* mov dreg_hi,dst_hi */ + EMIT2(0x89, + add_2reg(0xC0, dreg_hi, dst_hi)); } if (sstk) { @@ -1996,8 +2004,8 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, } case BPF_JMP | BPF_JSET | BPF_K: { u32 hi; - u8 dreg_lo = dstk ? IA32_EAX : dst_lo; - u8 dreg_hi = dstk ? IA32_EDX : dst_hi; + u8 dreg_lo = IA32_EAX; + u8 dreg_hi = IA32_EDX; u8 sreg_lo = IA32_ECX; u8 sreg_hi = IA32_EBX; @@ -2006,6 +2014,12 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, STACK_VAR(dst_lo)); EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX), STACK_VAR(dst_hi)); + } else { + /* mov dreg_lo,dst_lo */ + EMIT2(0x89, add_2reg(0xC0, dreg_lo, dst_lo)); + /* mov dreg_hi,dst_hi */ + EMIT2(0x89, + add_2reg(0xC0, dreg_hi, dst_hi)); } hi = imm32 & (1<<31) ? (u32)~0 : 0; diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index 2a9a703ef4a07ea2759344e5f5cefc805fb16b8c..52dd59af873ee0b1626abca7c1e5455c0b2c22e8 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c @@ -833,7 +833,7 @@ efi_thunk_set_variable(efi_char16_t *name, efi_guid_t *vendor, phys_vendor = virt_to_phys_or_null(vnd); phys_data = virt_to_phys_or_null_size(data, data_size); - if (!phys_name || !phys_data) + if (!phys_name || (data && !phys_data)) status = EFI_INVALID_PARAMETER; else status = efi_thunk(set_variable, phys_name, phys_vendor, @@ -864,7 +864,7 @@ efi_thunk_set_variable_nonblocking(efi_char16_t *name, efi_guid_t *vendor, phys_vendor = virt_to_phys_or_null(vnd); phys_data = virt_to_phys_or_null_size(data, data_size); - if (!phys_name || !phys_data) + if (!phys_name || (data && !phys_data)) status = EFI_INVALID_PARAMETER; else status = efi_thunk(set_variable, phys_name, phys_vendor, diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index 66b1ebc21ce4f10b7178e4964d35f2414952d6f2..5198ed1b36690a31ef44541e56db5adb597e8c56 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -5156,20 +5156,28 @@ static struct bfq_queue *bfq_init_rq(struct request *rq) return bfqq; } -static void bfq_idle_slice_timer_body(struct bfq_queue *bfqq) +static void +bfq_idle_slice_timer_body(struct bfq_data *bfqd, struct bfq_queue *bfqq) { - struct bfq_data *bfqd = bfqq->bfqd; enum bfqq_expiration reason; unsigned long flags; spin_lock_irqsave(&bfqd->lock, flags); - bfq_clear_bfqq_wait_request(bfqq); + /* + * Considering that bfqq may be in race, we should firstly check + * whether bfqq is in service before doing something on it. If + * the bfqq in race is not in service, it has already been expired + * through __bfq_bfqq_expire func and its wait_request flags has + * been cleared in __bfq_bfqd_reset_in_service func. + */ if (bfqq != bfqd->in_service_queue) { spin_unlock_irqrestore(&bfqd->lock, flags); return; } + bfq_clear_bfqq_wait_request(bfqq); + if (bfq_bfqq_budget_timeout(bfqq)) /* * Also here the queue can be safely expired @@ -5214,7 +5222,7 @@ static enum hrtimer_restart bfq_idle_slice_timer(struct hrtimer *timer) * early. */ if (bfqq) - bfq_idle_slice_timer_body(bfqq); + bfq_idle_slice_timer_body(bfqd, bfqq); return HRTIMER_NORESTART; } diff --git a/block/blk-crypto.c b/block/blk-crypto.c index f56bbec1132f515ab224b1d5a49f286e67bb4a0e..e07a37cf8b5fbe2ec696e1027c17788fdfa7d0a0 100644 --- a/block/blk-crypto.c +++ b/block/blk-crypto.c @@ -108,9 +108,10 @@ int blk_crypto_submit_bio(struct bio **bio_ptr) /* Get device keyslot if supported */ if (keyslot_manager_crypto_mode_supported(q->ksm, - bc->bc_key->crypto_mode, - bc->bc_key->data_unit_size, - bc->bc_key->is_hw_wrapped)) { + bc->bc_key->crypto_mode, + blk_crypto_key_dun_bytes(bc->bc_key), + bc->bc_key->data_unit_size, + bc->bc_key->is_hw_wrapped)) { err = bio_crypt_ctx_acquire_keyslot(bc, q->ksm); if (!err) return 0; @@ -180,6 +181,8 @@ bool blk_crypto_endio(struct bio *bio) * @is_hw_wrapped has to be set for such keys) * @is_hw_wrapped: Denotes @raw_key is wrapped. * @crypto_mode: identifier for the encryption algorithm to use + * @dun_bytes: number of bytes that will be used to specify the DUN when this + * key is used * @data_unit_size: the data unit size to use for en/decryption * * Return: The blk_crypto_key that was prepared, or an ERR_PTR() on error. When @@ -189,10 +192,12 @@ int blk_crypto_init_key(struct blk_crypto_key *blk_key, const u8 *raw_key, unsigned int raw_key_size, bool is_hw_wrapped, enum blk_crypto_mode_num crypto_mode, + unsigned int dun_bytes, unsigned int data_unit_size) { const struct blk_crypto_mode *mode; static siphash_key_t hash_key; + u32 hash; memset(blk_key, 0, sizeof(*blk_key)); @@ -211,6 +216,9 @@ int blk_crypto_init_key(struct blk_crypto_key *blk_key, return -EINVAL; } + if (dun_bytes <= 0 || dun_bytes > BLK_CRYPTO_MAX_IV_SIZE) + return -EINVAL; + if (!is_power_of_2(data_unit_size)) return -EINVAL; @@ -227,7 +235,8 @@ int blk_crypto_init_key(struct blk_crypto_key *blk_key, * precomputed here so that it only needs to be computed once per key. */ get_random_once(&hash_key, sizeof(hash_key)); - blk_key->hash = siphash(raw_key, raw_key_size, &hash_key); + hash = (u32)siphash(raw_key, raw_key_size, &hash_key); + blk_crypto_key_set_hash_and_dun_bytes(blk_key, hash, dun_bytes); return 0; } @@ -236,6 +245,7 @@ EXPORT_SYMBOL_GPL(blk_crypto_init_key); /** * blk_crypto_start_using_mode() - Start using blk-crypto on a device * @crypto_mode: the crypto mode that will be used + * @dun_bytes: number of bytes that will be used to specify the DUN * @data_unit_size: the data unit size that will be used * @is_hw_wrapped_key: whether the key will be hardware-wrapped * @q: the request queue for the device @@ -249,12 +259,13 @@ EXPORT_SYMBOL_GPL(blk_crypto_init_key); * algorithm is disabled in the crypto API; or another -errno code. */ int blk_crypto_start_using_mode(enum blk_crypto_mode_num crypto_mode, + unsigned int dun_bytes, unsigned int data_unit_size, bool is_hw_wrapped_key, struct request_queue *q) { if (keyslot_manager_crypto_mode_supported(q->ksm, crypto_mode, - data_unit_size, + dun_bytes, data_unit_size, is_hw_wrapped_key)) return 0; if (is_hw_wrapped_key) { @@ -285,6 +296,7 @@ int blk_crypto_evict_key(struct request_queue *q, { if (q->ksm && keyslot_manager_crypto_mode_supported(q->ksm, key->crypto_mode, + blk_crypto_key_dun_bytes(key), key->data_unit_size, key->is_hw_wrapped)) return keyslot_manager_evict_key(q->ksm, key); diff --git a/block/blk-ioc.c b/block/blk-ioc.c index 01580f88fcb39fd68f04253fb216d859dc2577e5..4c810969c3e2fb77657b895e740e1796a4e6c79e 100644 --- a/block/blk-ioc.c +++ b/block/blk-ioc.c @@ -87,6 +87,7 @@ static void ioc_destroy_icq(struct io_cq *icq) * making it impossible to determine icq_cache. Record it in @icq. */ icq->__rcu_icq_cache = et->icq_cache; + icq->flags |= ICQ_DESTROYED; call_rcu(&icq->__rcu_head, icq_free_icq_rcu); } @@ -230,15 +231,21 @@ static void __ioc_clear_queue(struct list_head *icq_list) { unsigned long flags; + rcu_read_lock(); while (!list_empty(icq_list)) { struct io_cq *icq = list_entry(icq_list->next, struct io_cq, q_node); struct io_context *ioc = icq->ioc; spin_lock_irqsave(&ioc->lock, flags); + if (icq->flags & ICQ_DESTROYED) { + spin_unlock_irqrestore(&ioc->lock, flags); + continue; + } ioc_destroy_icq(icq); spin_unlock_irqrestore(&ioc->lock, flags); } + rcu_read_unlock(); } /** diff --git a/block/blk-settings.c b/block/blk-settings.c index be9b39caadbd23bdd284ab8f035bb19ba2620391..01093b8f3e624f0a1862a532820f698728e072c3 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -717,6 +717,9 @@ void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n", top, bottom); } + + t->backing_dev_info->io_pages = + t->limits.max_sectors >> (PAGE_SHIFT - 9); } EXPORT_SYMBOL(disk_stack_limits); diff --git a/block/keyslot-manager.c b/block/keyslot-manager.c index d1dbac6e1a460d860d725f35e260d5745d747972..74b7485187e11e88eb08c08c04ac3aac19d0adab 100644 --- a/block/keyslot-manager.c +++ b/block/keyslot-manager.c @@ -45,6 +45,7 @@ struct keyslot_manager { struct keyslot_mgmt_ll_ops ksm_ll_ops; unsigned int features; unsigned int crypto_mode_supported[BLK_ENCRYPTION_MODE_MAX]; + unsigned int max_dun_bytes_supported; void *ll_priv_data; #ifdef CONFIG_PM @@ -182,6 +183,7 @@ struct keyslot_manager *keyslot_manager_create( ksm->features = features; memcpy(ksm->crypto_mode_supported, crypto_mode_supported, sizeof(ksm->crypto_mode_supported)); + ksm->max_dun_bytes_supported = BLK_CRYPTO_MAX_IV_SIZE; ksm->ll_priv_data = ll_priv_data; keyslot_manager_set_dev(ksm, dev); @@ -214,11 +216,19 @@ struct keyslot_manager *keyslot_manager_create( } EXPORT_SYMBOL_GPL(keyslot_manager_create); +void keyslot_manager_set_max_dun_bytes(struct keyslot_manager *ksm, + unsigned int max_dun_bytes) +{ + ksm->max_dun_bytes_supported = max_dun_bytes; +} +EXPORT_SYMBOL_GPL(keyslot_manager_set_max_dun_bytes); + static inline struct hlist_head * hash_bucket_for_key(struct keyslot_manager *ksm, const struct blk_crypto_key *key) { - return &ksm->slot_hashtable[key->hash & (ksm->slot_hashtable_size - 1)]; + return &ksm->slot_hashtable[blk_crypto_key_hash(key) & + (ksm->slot_hashtable_size - 1)]; } static void remove_slot_from_lru_list(struct keyslot_manager *ksm, int slot) @@ -391,6 +401,7 @@ void keyslot_manager_put_slot(struct keyslot_manager *ksm, unsigned int slot) * combination is supported by a ksm. * @ksm: The keyslot manager to check * @crypto_mode: The crypto mode to check for. + * @dun_bytes: The number of bytes that will be used to specify the DUN * @data_unit_size: The data_unit_size for the mode. * @is_hw_wrapped_key: Whether a hardware-wrapped key will be used. * @@ -402,6 +413,7 @@ void keyslot_manager_put_slot(struct keyslot_manager *ksm, unsigned int slot) */ bool keyslot_manager_crypto_mode_supported(struct keyslot_manager *ksm, enum blk_crypto_mode_num crypto_mode, + unsigned int dun_bytes, unsigned int data_unit_size, bool is_hw_wrapped_key) { @@ -418,7 +430,10 @@ bool keyslot_manager_crypto_mode_supported(struct keyslot_manager *ksm, if (!(ksm->features & BLK_CRYPTO_FEATURE_STANDARD_KEYS)) return false; } - return ksm->crypto_mode_supported[crypto_mode] & data_unit_size; + if (!(ksm->crypto_mode_supported[crypto_mode] & data_unit_size)) + return false; + + return ksm->max_dun_bytes_supported >= dun_bytes; } /** @@ -565,6 +580,7 @@ struct keyslot_manager *keyslot_manager_create_passthrough( ksm->features = features; memcpy(ksm->crypto_mode_supported, crypto_mode_supported, sizeof(ksm->crypto_mode_supported)); + ksm->max_dun_bytes_supported = BLK_CRYPTO_MAX_IV_SIZE; ksm->ll_priv_data = ll_priv_data; keyslot_manager_set_dev(ksm, dev); @@ -592,12 +608,16 @@ void keyslot_manager_intersect_modes(struct keyslot_manager *parent, unsigned int i; parent->features &= child->features; + parent->max_dun_bytes_supported = + min(parent->max_dun_bytes_supported, + child->max_dun_bytes_supported); for (i = 0; i < ARRAY_SIZE(child->crypto_mode_supported); i++) { parent->crypto_mode_supported[i] &= child->crypto_mode_supported[i]; } } else { parent->features = 0; + parent->max_dun_bytes_supported = 0; memset(parent->crypto_mode_supported, 0, sizeof(parent->crypto_mode_supported)); } diff --git a/build.config.aarch64 b/build.config.aarch64 index 523bbc0449f75f17fcdcaf78f3c4b4fcb225e3a0..7eabc9652bf507bd1d4938868134e35d9e9ecbe3 100644 --- a/build.config.aarch64 +++ b/build.config.aarch64 @@ -2,7 +2,9 @@ ARCH=arm64 CLANG_TRIPLE=aarch64-linux-gnu- CROSS_COMPILE=aarch64-linux-androidkernel- +CROSS_COMPILE_COMPAT=arm-linux-androideabi- LINUX_GCC_CROSS_COMPILE_PREBUILTS_BIN=prebuilts/gcc/linux-x86/aarch64/aarch64-linux-android-4.9/bin +LINUX_GCC_CROSS_COMPILE_COMPAT_PREBUILTS_BIN=prebuilts/gcc/linux-x86/arm/arm-linux-androideabi-4.9/bin/ FILES=" arch/arm64/boot/Image.gz diff --git a/build.config.allmodconfig b/build.config.allmodconfig index 29a32b609a9768fe597fd479d408e82c7bfbeaf6..4ce14e5a7c5a42f98f6d2aef188a5158d3a7dc84 100644 --- a/build.config.allmodconfig +++ b/build.config.allmodconfig @@ -1,11 +1,9 @@ DEFCONFIG=allmodconfig -# XFS_FS is currently broken on this branch with clang-9 POST_DEFCONFIG_CMDS="update_config" function update_config() { ${KERNEL_DIR}/scripts/config --file ${OUT_DIR}/.config \ -d TEST_KMOD \ - -d XFS_FS \ -d CPU_BIG_ENDIAN \ -d STM \ -d TEST_MEMCAT_P \ diff --git a/build.config.common b/build.config.common index 02d051d5c51f90e3f8cb6acff311f4609ca0a42b..5b0ddc2d9a3b0718ba3e4a449f37f1e5c5eabdaf 100644 --- a/build.config.common +++ b/build.config.common @@ -6,6 +6,7 @@ LD=ld.lld NM=llvm-nm OBJCOPY=llvm-objcopy CLANG_PREBUILT_BIN=prebuilts-master/clang/host/linux-x86/clang-r377782c/bin +BUILDTOOLS_PREBUILT_BIN=build/build-tools/path/linux-x86 EXTRA_CMDS='' STOP_SHIP_TRACEPRINTK=1 diff --git a/build.config.gki.aarch64 b/build.config.gki.aarch64 index 03a98c665fc982cdd671ce67adacb4141ee7b088..8ceaaff0f7b3b3ad34afc852eee9b0fa1f8f7936 100644 --- a/build.config.gki.aarch64 +++ b/build.config.gki.aarch64 @@ -9,3 +9,4 @@ abi_gki_aarch64_cuttlefish_whitelist abi_gki_aarch64_qcom_whitelist " TRIM_NONLISTED_KMI=1 +KMI_WHITELIST_STRICT_MODE=1 diff --git a/drivers/Kconfig b/drivers/Kconfig index 681051a96e6796efb83ea8f281f8da34e38c6860..d9b8512504f9f9abbed960198aa8da58fa117457 100644 --- a/drivers/Kconfig +++ b/drivers/Kconfig @@ -195,6 +195,8 @@ source "drivers/thunderbolt/Kconfig" source "drivers/android/Kconfig" +source "drivers/gpu/trace/Kconfig" + source "drivers/nvdimm/Kconfig" source "drivers/dax/Kconfig" diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c index 19f5f7cc6311ae4d0a08135e6f79982ed8889cff..0091587cd135c39425ac4bbdd16bf2beb6a81c2b 100644 --- a/drivers/acpi/device_pm.c +++ b/drivers/acpi/device_pm.c @@ -227,13 +227,13 @@ int acpi_device_set_power(struct acpi_device *device, int state) end: if (result) { dev_warn(&device->dev, "Failed to change power state to %s\n", - acpi_power_state_string(state)); + acpi_power_state_string(target_state)); } else { device->power.state = target_state; ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] transitioned to %s\n", device->pnp.bus_id, - acpi_power_state_string(state))); + acpi_power_state_string(target_state))); } return result; diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c index fbc936cf2025c7eaa1d3a51d5463dcafabb3894c..62c0fe9ef41240cb79fba1c557b7e4f9487d451f 100644 --- a/drivers/acpi/processor_throttling.c +++ b/drivers/acpi/processor_throttling.c @@ -910,13 +910,6 @@ static long __acpi_processor_get_throttling(void *data) return pr->throttling.acpi_processor_get_throttling(pr); } -static int call_on_cpu(int cpu, long (*fn)(void *), void *arg, bool direct) -{ - if (direct || (is_percpu_thread() && cpu == smp_processor_id())) - return fn(arg); - return work_on_cpu(cpu, fn, arg); -} - static int acpi_processor_get_throttling(struct acpi_processor *pr) { if (!pr) diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index deb37977cab9c12210e1afcced3768c843fed9fe..215144c1989239cdd9bc64b7a3327b4ec556b2da 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c @@ -938,8 +938,8 @@ enum lru_status binder_alloc_free_page(struct list_head *item, mm = alloc->vma_vm_mm; if (!mmget_not_zero(mm)) goto err_mmget; - if (!down_write_trylock(&mm->mmap_sem)) - goto err_down_write_mmap_sem_failed; + if (!down_read_trylock(&mm->mmap_sem)) + goto err_down_read_mmap_sem_failed; vma = binder_alloc_get_vma(alloc); list_lru_isolate(lru, item); @@ -952,7 +952,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item, trace_binder_unmap_user_end(alloc, index); } - up_write(&mm->mmap_sem); + up_read(&mm->mmap_sem); mmput(mm); trace_binder_unmap_kernel_start(alloc, index); @@ -966,7 +966,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item, mutex_unlock(&alloc->mutex); return LRU_REMOVED_RETRY; -err_down_write_mmap_sem_failed: +err_down_read_mmap_sem_failed: mmput_async(mm); err_mmget: err_page_already_freed: diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c index 2ae1799f49927231f16cf49cbfb872607c128ea3..51eeaea65833d19c73c2d9b0dce0b108ab1780d5 100644 --- a/drivers/ata/libata-pmp.c +++ b/drivers/ata/libata-pmp.c @@ -764,6 +764,7 @@ static int sata_pmp_eh_recover_pmp(struct ata_port *ap, if (dev->flags & ATA_DFLAG_DETACH) { detach = 1; + rc = -ENODEV; goto fail; } diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 3a64fa4aaf7e34257a30b0141b40b589fbbd3005..0c1572a1cc5ed0ae4ef27b4bf8b29a71a75c0ab5 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -4570,22 +4570,19 @@ int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht) */ shost->max_host_blocked = 1; - rc = scsi_add_host_with_dma(ap->scsi_host, - &ap->tdev, ap->host->dev); + rc = scsi_add_host_with_dma(shost, &ap->tdev, ap->host->dev); if (rc) - goto err_add; + goto err_alloc; } return 0; - err_add: - scsi_host_put(host->ports[i]->scsi_host); err_alloc: while (--i >= 0) { struct Scsi_Host *shost = host->ports[i]->scsi_host; + /* scsi_host_put() is in ata_devres_release() */ scsi_remove_host(shost); - scsi_host_put(shost); } return rc; } diff --git a/drivers/base/core.c b/drivers/base/core.c index af196f1558ee5f5887d78dbd717c12f25f9e621a..35a5e3f374240a7654399970a300ce432eb93f40 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -2939,6 +2939,7 @@ int device_online(struct device *dev) return ret; } +EXPORT_SYMBOL_GPL(device_online); struct root_device { struct device dev; diff --git a/drivers/base/firmware_loader/fallback.c b/drivers/base/firmware_loader/fallback.c index 818d8c37d70a9b7dbf7d93fb1383574bcc275c00..3b7b748c4d4f244e1f210e3d5ee391da34bca265 100644 --- a/drivers/base/firmware_loader/fallback.c +++ b/drivers/base/firmware_loader/fallback.c @@ -572,7 +572,7 @@ static int fw_load_sysfs_fallback(struct fw_sysfs *fw_sysfs, } retval = fw_sysfs_wait_timeout(fw_priv, timeout); - if (retval < 0) { + if (retval < 0 && retval != -ENOENT) { mutex_lock(&fw_lock); fw_load_abort(fw_sysfs); mutex_unlock(&fw_lock); diff --git a/drivers/block/loop.c b/drivers/block/loop.c index feb97e7bdbad4d0ed777a6ae0904c0e656469eb7..45f8b1f354ef36e5131d5020a7be08ad5568e11c 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -428,11 +428,12 @@ static int lo_fallocate(struct loop_device *lo, struct request *rq, loff_t pos, * information. */ struct file *file = lo->lo_backing_file; + struct request_queue *q = lo->lo_queue; int ret; mode |= FALLOC_FL_KEEP_SIZE; - if ((!file->f_op->fallocate) || lo->lo_encrypt_key_size) { + if (!blk_queue_discard(q)) { ret = -EOPNOTSUPP; goto out; } @@ -866,28 +867,47 @@ static void loop_config_discard(struct loop_device *lo) struct inode *inode = file->f_mapping->host; struct request_queue *q = lo->lo_queue; + /* + * If the backing device is a block device, mirror its zeroing + * capability. Set the discard sectors to the block device's zeroing + * capabilities because loop discards result in blkdev_issue_zeroout(), + * not blkdev_issue_discard(). This maintains consistent behavior with + * file-backed loop devices: discarded regions read back as zero. + */ + if (S_ISBLK(inode->i_mode) && !lo->lo_encrypt_key_size) { + struct request_queue *backingq; + + backingq = bdev_get_queue(inode->i_bdev); + blk_queue_max_discard_sectors(q, + backingq->limits.max_write_zeroes_sectors); + + blk_queue_max_write_zeroes_sectors(q, + backingq->limits.max_write_zeroes_sectors); + /* * We use punch hole to reclaim the free space used by the * image a.k.a. discard. However we do not support discard if * encryption is enabled, because it may give an attacker * useful information. */ - if ((!file->f_op->fallocate) || - lo->lo_encrypt_key_size) { + } else if (!file->f_op->fallocate || lo->lo_encrypt_key_size) { q->limits.discard_granularity = 0; q->limits.discard_alignment = 0; blk_queue_max_discard_sectors(q, 0); blk_queue_max_write_zeroes_sectors(q, 0); - blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q); - return; - } - q->limits.discard_granularity = inode->i_sb->s_blocksize; - q->limits.discard_alignment = 0; + } else { + q->limits.discard_granularity = inode->i_sb->s_blocksize; + q->limits.discard_alignment = 0; - blk_queue_max_discard_sectors(q, UINT_MAX >> 9); - blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9); - blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); + blk_queue_max_discard_sectors(q, UINT_MAX >> 9); + blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9); + } + + if (q->limits.max_write_zeroes_sectors) + blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); + else + blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q); } static void loop_unprepare_queue(struct loop_device *lo) @@ -976,6 +996,16 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode, if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync) blk_queue_write_cache(lo->lo_queue, true, false); + if (io_is_direct(lo->lo_backing_file) && inode->i_sb->s_bdev) { + /* In case of direct I/O, match underlying block size */ + unsigned short bsize = bdev_logical_block_size( + inode->i_sb->s_bdev); + + blk_queue_logical_block_size(lo->lo_queue, bsize); + blk_queue_physical_block_size(lo->lo_queue, bsize); + blk_queue_io_min(lo->lo_queue, bsize); + } + loop_update_dio(lo); set_capacity(lo->lo_disk, size); bd_set_size(bdev, size << 9); diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c index c5c0b7c89481555074bda218c8a545a2539d1ee4..d2d7dc9cd58d21fcfb897af6f97a50843fcffc63 100644 --- a/drivers/block/null_blk_main.c +++ b/drivers/block/null_blk_main.c @@ -571,6 +571,7 @@ static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq) if (tag != -1U) { cmd = &nq->cmds[tag]; cmd->tag = tag; + cmd->error = BLK_STS_OK; cmd->nq = nq; if (nq->dev->irqmode == NULL_IRQ_TIMER) { hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, @@ -1433,6 +1434,7 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx, cmd->timer.function = null_cmd_timer_expired; } cmd->rq = bd->rq; + cmd->error = BLK_STS_OK; cmd->nq = nq; blk_mq_start_request(bd->rq); @@ -1480,7 +1482,12 @@ static void cleanup_queues(struct nullb *nullb) static void null_del_dev(struct nullb *nullb) { - struct nullb_device *dev = nullb->dev; + struct nullb_device *dev; + + if (!nullb) + return; + + dev = nullb->dev; ida_simple_remove(&nullb_indexes, nullb->index); @@ -1844,6 +1851,7 @@ static int null_add_dev(struct nullb_device *dev) cleanup_queues(nullb); out_free_nullb: kfree(nullb); + dev->nullb = NULL; out: return rv; } diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index d3ad1b8c133e6f39190a5effdf2c8d984fa6eaae..1101290971699fb77059f29c5b8a3c69960aecc5 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -3427,6 +3427,10 @@ static void cancel_tasks_sync(struct rbd_device *rbd_dev) cancel_work_sync(&rbd_dev->unlock_work); } +/* + * header_rwsem must not be held to avoid a deadlock with + * rbd_dev_refresh() when flushing notifies. + */ static void rbd_unregister_watch(struct rbd_device *rbd_dev) { WARN_ON(waitqueue_active(&rbd_dev->lock_waitq)); @@ -5719,9 +5723,10 @@ static int rbd_dev_header_name(struct rbd_device *rbd_dev) static void rbd_dev_image_release(struct rbd_device *rbd_dev) { - rbd_dev_unprobe(rbd_dev); if (rbd_dev->opts) rbd_unregister_watch(rbd_dev); + + rbd_dev_unprobe(rbd_dev); rbd_dev->image_format = 0; kfree(rbd_dev->spec->image_id); rbd_dev->spec->image_id = NULL; @@ -5732,6 +5737,9 @@ static void rbd_dev_image_release(struct rbd_device *rbd_dev) * device. If this image is the one being mapped (i.e., not a * parent), initiate a watch on its header object before using that * object to get detailed information about the rbd image. + * + * On success, returns with header_rwsem held for write if called + * with @depth == 0. */ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth) { @@ -5764,9 +5772,12 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth) } } + if (!depth) + down_write(&rbd_dev->header_rwsem); + ret = rbd_dev_header_info(rbd_dev); if (ret) - goto err_out_watch; + goto err_out_probe; /* * If this image is the one being mapped, we have pool name and @@ -5812,10 +5823,11 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth) return 0; err_out_probe: - rbd_dev_unprobe(rbd_dev); -err_out_watch: + if (!depth) + up_write(&rbd_dev->header_rwsem); if (!depth) rbd_unregister_watch(rbd_dev); + rbd_dev_unprobe(rbd_dev); err_out_format: rbd_dev->image_format = 0; kfree(rbd_dev->spec->image_id); @@ -5872,12 +5884,9 @@ static ssize_t do_rbd_add(struct bus_type *bus, goto err_out_rbd_dev; } - down_write(&rbd_dev->header_rwsem); rc = rbd_dev_image_probe(rbd_dev, 0); - if (rc < 0) { - up_write(&rbd_dev->header_rwsem); + if (rc < 0) goto err_out_rbd_dev; - } /* If we are mapping a snapshot it must be marked read-only */ if (rbd_dev->spec->snap_id != CEPH_NOSNAP) diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 728c9a9609f0c658aff302f76e322a560ef83764..9a3c2b14ac378146bfdbfde86481c61191b62ac1 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -277,9 +277,14 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx, if (err == -ENOSPC) blk_mq_stop_hw_queue(hctx); spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); - if (err == -ENOMEM || err == -ENOSPC) + switch (err) { + case -ENOSPC: return BLK_STS_DEV_RESOURCE; - return BLK_STS_IOERR; + case -ENOMEM: + return BLK_STS_RESOURCE; + default: + return BLK_STS_IOERR; + } } if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq)) diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 9a57af79f330c1210e305553d6c86f2a17b6fc34..adc0e3ed01c24f6745ece8e56480410dabf81ba3 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -47,6 +47,7 @@ #include #include #include +#include #include #include @@ -2188,10 +2189,12 @@ static void blkfront_setup_discard(struct blkfront_info *info) static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo) { - unsigned int psegs, grants; + unsigned int psegs, grants, memflags; int err, i; struct blkfront_info *info = rinfo->dev_info; + memflags = memalloc_noio_save(); + if (info->max_indirect_segments == 0) { if (!HAS_EXTRA_REQ) grants = BLKIF_MAX_SEGMENTS_PER_REQUEST; @@ -2223,7 +2226,7 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo) BUG_ON(!list_empty(&rinfo->indirect_pages)); for (i = 0; i < num; i++) { - struct page *indirect_page = alloc_page(GFP_NOIO); + struct page *indirect_page = alloc_page(GFP_KERNEL); if (!indirect_page) goto out_of_memory; list_add(&indirect_page->lru, &rinfo->indirect_pages); @@ -2234,15 +2237,15 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo) rinfo->shadow[i].grants_used = kvcalloc(grants, sizeof(rinfo->shadow[i].grants_used[0]), - GFP_NOIO); + GFP_KERNEL); rinfo->shadow[i].sg = kvcalloc(psegs, sizeof(rinfo->shadow[i].sg[0]), - GFP_NOIO); + GFP_KERNEL); if (info->max_indirect_segments) rinfo->shadow[i].indirect_grants = kvcalloc(INDIRECT_GREFS(grants), sizeof(rinfo->shadow[i].indirect_grants[0]), - GFP_NOIO); + GFP_KERNEL); if ((rinfo->shadow[i].grants_used == NULL) || (rinfo->shadow[i].sg == NULL) || (info->max_indirect_segments && @@ -2251,6 +2254,7 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo) sg_init_table(rinfo->shadow[i].sg, psegs); } + memalloc_noio_restore(memflags); return 0; @@ -2270,6 +2274,9 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo) __free_page(indirect_page); } } + + memalloc_noio_restore(memflags); + return -ENOMEM; } diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c index 1b76d958590275daa6b9c2f8c69ecd2d51655da7..2ca2cc56bcef6f849c98d465a59e43ad62214a10 100644 --- a/drivers/bus/sunxi-rsb.c +++ b/drivers/bus/sunxi-rsb.c @@ -345,7 +345,7 @@ static int sunxi_rsb_read(struct sunxi_rsb *rsb, u8 rtaddr, u8 addr, if (ret) goto unlock; - *buf = readl(rsb->regs + RSB_DATA); + *buf = readl(rsb->regs + RSB_DATA) & GENMASK(len * 8 - 1, 0); unlock: mutex_unlock(&rsb->lock); diff --git a/drivers/char/hw_random/imx-rngc.c b/drivers/char/hw_random/imx-rngc.c index 14730be54edf64901d2e4f9a26522ece8131d2e5..dc9b8f377907f0e32298bb4686bfc4fe57501f3c 100644 --- a/drivers/char/hw_random/imx-rngc.c +++ b/drivers/char/hw_random/imx-rngc.c @@ -111,8 +111,10 @@ static int imx_rngc_self_test(struct imx_rngc *rngc) return -ETIMEDOUT; } - if (rngc->err_reg != 0) + if (rngc->err_reg != 0) { + imx_rngc_irq_mask_clear(rngc); return -EIO; + } return 0; } diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index 980eb7c609528a99503a3d90ce7f8f5dc06e071f..69734b1df79272cc459b7ccb9bac183f2807bfce 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -3134,8 +3134,8 @@ static void __get_guid(struct ipmi_smi *intf) if (rv) /* Send failed, no GUID available. */ bmc->dyn_guid_set = 0; - - wait_event(intf->waitq, bmc->dyn_guid_set != 2); + else + wait_event(intf->waitq, bmc->dyn_guid_set != 2); /* dyn_guid_set makes the guid data available. */ smp_rmb(); diff --git a/drivers/char/random.c b/drivers/char/random.c index d0be947c68427376bacfb4a89904a70eb7e76e51..e529698cb53fc3345c2480842245ec4c43e53283 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -2146,11 +2146,11 @@ struct batched_entropy { /* * Get a random word for internal kernel use only. The quality of the random - * number is either as good as RDRAND or as good as /dev/urandom, with the - * goal of being quite fast and not depleting entropy. In order to ensure + * number is good as /dev/urandom, but there is no backtrack protection, with + * the goal of being quite fast and not depleting entropy. In order to ensure * that the randomness provided by this function is okay, the function - * wait_for_random_bytes() should be called and return 0 at least once - * at any point prior. + * wait_for_random_bytes() should be called and return 0 at least once at any + * point prior. */ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = { .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u64.lock), @@ -2163,15 +2163,6 @@ u64 get_random_u64(void) struct batched_entropy *batch; static void *previous; -#if BITS_PER_LONG == 64 - if (arch_get_random_long((unsigned long *)&ret)) - return ret; -#else - if (arch_get_random_long((unsigned long *)&ret) && - arch_get_random_long((unsigned long *)&ret + 1)) - return ret; -#endif - warn_unseeded_randomness(&previous); batch = raw_cpu_ptr(&batched_entropy_u64); @@ -2196,9 +2187,6 @@ u32 get_random_u32(void) struct batched_entropy *batch; static void *previous; - if (arch_get_random_int(&ret)) - return ret; - warn_unseeded_randomness(&previous); batch = raw_cpu_ptr(&batched_entropy_u32); diff --git a/drivers/char/tpm/eventlog/common.c b/drivers/char/tpm/eventlog/common.c index 5a8720df2b51384930220a8111e306aaf58196f9..7d70b654df04d44c974ee069407a0294a4b9da78 100644 --- a/drivers/char/tpm/eventlog/common.c +++ b/drivers/char/tpm/eventlog/common.c @@ -104,11 +104,8 @@ static int tpm_read_log(struct tpm_chip *chip) * * If an event log is found then the securityfs files are setup to * export it to userspace, otherwise nothing is done. - * - * Returns -ENODEV if the firmware has no event log or securityfs is not - * supported. */ -int tpm_bios_log_setup(struct tpm_chip *chip) +void tpm_bios_log_setup(struct tpm_chip *chip) { const char *name = dev_name(&chip->dev); unsigned int cnt; @@ -117,7 +114,7 @@ int tpm_bios_log_setup(struct tpm_chip *chip) rc = tpm_read_log(chip); if (rc < 0) - return rc; + return; log_version = rc; cnt = 0; @@ -163,13 +160,12 @@ int tpm_bios_log_setup(struct tpm_chip *chip) cnt++; } - return 0; + return; err: - rc = PTR_ERR(chip->bios_dir[cnt]); chip->bios_dir[cnt] = NULL; tpm_bios_log_teardown(chip); - return rc; + return; } void tpm_bios_log_teardown(struct tpm_chip *chip) diff --git a/drivers/char/tpm/eventlog/tpm1.c b/drivers/char/tpm/eventlog/tpm1.c index 58c84784ba25c72020e233fd3cbe82f12945aa16..a4621c83e2bf7cd1344378a15c04e24e5e0b0290 100644 --- a/drivers/char/tpm/eventlog/tpm1.c +++ b/drivers/char/tpm/eventlog/tpm1.c @@ -129,6 +129,7 @@ static void *tpm1_bios_measurements_next(struct seq_file *m, void *v, u32 converted_event_size; u32 converted_event_type; + (*pos)++; converted_event_size = do_endian_conversion(event->event_size); v += sizeof(struct tcpa_event) + converted_event_size; @@ -146,7 +147,6 @@ static void *tpm1_bios_measurements_next(struct seq_file *m, void *v, ((v + sizeof(struct tcpa_event) + converted_event_size) >= limit)) return NULL; - (*pos)++; return v; } diff --git a/drivers/char/tpm/eventlog/tpm2.c b/drivers/char/tpm/eventlog/tpm2.c index 41b9f6c92da73114379f2dda56b34a7c1ac89058..aec49c925cee1f4fdd02800e6c05b15fba2648ac 100644 --- a/drivers/char/tpm/eventlog/tpm2.c +++ b/drivers/char/tpm/eventlog/tpm2.c @@ -143,6 +143,7 @@ static void *tpm2_bios_measurements_next(struct seq_file *m, void *v, size_t event_size; void *marker; + (*pos)++; event_header = log->bios_event_log; if (v == SEQ_START_TOKEN) { @@ -167,7 +168,6 @@ static void *tpm2_bios_measurements_next(struct seq_file *m, void *v, if (((v + event_size) >= limit) || (event_size == 0)) return NULL; - (*pos)++; return v; } diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c index 0b01eb7b14e536052381f953c5c2582ca936f5d9..4946c5b37d04df37a5afc841959b8f5cc465bc99 100644 --- a/drivers/char/tpm/tpm-chip.c +++ b/drivers/char/tpm/tpm-chip.c @@ -463,9 +463,7 @@ int tpm_chip_register(struct tpm_chip *chip) tpm_sysfs_add_device(chip); - rc = tpm_bios_log_setup(chip); - if (rc != 0 && rc != -ENODEV) - return rc; + tpm_bios_log_setup(chip); tpm_add_ppi(chip); diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h index f3501d05264f51f87cda0795cd111554eaa4b062..289221d653cb2fa40a279966aa024d03af5ddf7a 100644 --- a/drivers/char/tpm/tpm.h +++ b/drivers/char/tpm/tpm.h @@ -602,6 +602,6 @@ int tpm2_prepare_space(struct tpm_chip *chip, struct tpm_space *space, u32 cc, int tpm2_commit_space(struct tpm_chip *chip, struct tpm_space *space, u32 cc, u8 *buf, size_t *bufsiz); -int tpm_bios_log_setup(struct tpm_chip *chip); +void tpm_bios_log_setup(struct tpm_chip *chip); void tpm_bios_log_teardown(struct tpm_chip *chip); #endif diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c index 77e47dc5aacc5705b465583d2c02f26f8b393d56..569e93e1f06ccdc978d30ee6035aa850dc2f5e72 100644 --- a/drivers/char/tpm/tpm_ibmvtpm.c +++ b/drivers/char/tpm/tpm_ibmvtpm.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012 IBM Corporation + * Copyright (C) 2012-2020 IBM Corporation * * Author: Ashley Lai * @@ -140,6 +140,64 @@ static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count) return len; } +/** + * ibmvtpm_crq_send_init - Send a CRQ initialize message + * @ibmvtpm: vtpm device struct + * + * Return: + * 0 on success. + * Non-zero on failure. + */ +static int ibmvtpm_crq_send_init(struct ibmvtpm_dev *ibmvtpm) +{ + int rc; + + rc = ibmvtpm_send_crq_word(ibmvtpm->vdev, INIT_CRQ_CMD); + if (rc != H_SUCCESS) + dev_err(ibmvtpm->dev, + "%s failed rc=%d\n", __func__, rc); + + return rc; +} + +/** + * tpm_ibmvtpm_resume - Resume from suspend + * + * @dev: device struct + * + * Return: Always 0. + */ +static int tpm_ibmvtpm_resume(struct device *dev) +{ + struct tpm_chip *chip = dev_get_drvdata(dev); + struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev); + int rc = 0; + + do { + if (rc) + msleep(100); + rc = plpar_hcall_norets(H_ENABLE_CRQ, + ibmvtpm->vdev->unit_address); + } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc)); + + if (rc) { + dev_err(dev, "Error enabling ibmvtpm rc=%d\n", rc); + return rc; + } + + rc = vio_enable_interrupts(ibmvtpm->vdev); + if (rc) { + dev_err(dev, "Error vio_enable_interrupts rc=%d\n", rc); + return rc; + } + + rc = ibmvtpm_crq_send_init(ibmvtpm); + if (rc) + dev_err(dev, "Error send_init rc=%d\n", rc); + + return rc; +} + /** * tpm_ibmvtpm_send() - Send a TPM command * @chip: tpm chip struct @@ -153,6 +211,7 @@ static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count) static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) { struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev); + bool retry = true; int rc, sig; if (!ibmvtpm->rtce_buf) { @@ -186,18 +245,27 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) */ ibmvtpm->tpm_processing_cmd = true; +again: rc = ibmvtpm_send_crq(ibmvtpm->vdev, IBMVTPM_VALID_CMD, VTPM_TPM_COMMAND, count, ibmvtpm->rtce_dma_handle); if (rc != H_SUCCESS) { + /* + * H_CLOSED can be returned after LPM resume. Call + * tpm_ibmvtpm_resume() to re-enable the CRQ then retry + * ibmvtpm_send_crq() once before failing. + */ + if (rc == H_CLOSED && retry) { + tpm_ibmvtpm_resume(ibmvtpm->dev); + retry = false; + goto again; + } dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc); - rc = 0; ibmvtpm->tpm_processing_cmd = false; - } else - rc = 0; + } spin_unlock(&ibmvtpm->rtce_lock); - return rc; + return 0; } static void tpm_ibmvtpm_cancel(struct tpm_chip *chip) @@ -275,26 +343,6 @@ static int ibmvtpm_crq_send_init_complete(struct ibmvtpm_dev *ibmvtpm) return rc; } -/** - * ibmvtpm_crq_send_init - Send a CRQ initialize message - * @ibmvtpm: vtpm device struct - * - * Return: - * 0 on success. - * Non-zero on failure. - */ -static int ibmvtpm_crq_send_init(struct ibmvtpm_dev *ibmvtpm) -{ - int rc; - - rc = ibmvtpm_send_crq_word(ibmvtpm->vdev, INIT_CRQ_CMD); - if (rc != H_SUCCESS) - dev_err(ibmvtpm->dev, - "ibmvtpm_crq_send_init failed rc=%d\n", rc); - - return rc; -} - /** * tpm_ibmvtpm_remove - ibm vtpm remove entry point * @vdev: vio device struct @@ -407,44 +455,6 @@ static int ibmvtpm_reset_crq(struct ibmvtpm_dev *ibmvtpm) ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE); } -/** - * tpm_ibmvtpm_resume - Resume from suspend - * - * @dev: device struct - * - * Return: Always 0. - */ -static int tpm_ibmvtpm_resume(struct device *dev) -{ - struct tpm_chip *chip = dev_get_drvdata(dev); - struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev); - int rc = 0; - - do { - if (rc) - msleep(100); - rc = plpar_hcall_norets(H_ENABLE_CRQ, - ibmvtpm->vdev->unit_address); - } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc)); - - if (rc) { - dev_err(dev, "Error enabling ibmvtpm rc=%d\n", rc); - return rc; - } - - rc = vio_enable_interrupts(ibmvtpm->vdev); - if (rc) { - dev_err(dev, "Error vio_enable_interrupts rc=%d\n", rc); - return rc; - } - - rc = ibmvtpm_crq_send_init(ibmvtpm); - if (rc) - dev_err(dev, "Error send_init rc=%d\n", rc); - - return rc; -} - static bool tpm_ibmvtpm_req_canceled(struct tpm_chip *chip, u8 status) { return (status == 0); diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c index 0eaea3a7b8f425dbd2d2ef1694649ef883d3dbc6..5d8f8f018984d9fe5c765fd35ac19a645eba6752 100644 --- a/drivers/char/tpm/tpm_tis_core.c +++ b/drivers/char/tpm/tpm_tis_core.c @@ -437,6 +437,9 @@ static void disable_interrupts(struct tpm_chip *chip) u32 intmask; int rc; + if (priv->irq == 0) + return; + rc = tpm_tis_read32(priv, TPM_INT_ENABLE(priv->locality), &intmask); if (rc < 0) intmask = 0; @@ -984,9 +987,12 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq, if (irq) { tpm_tis_probe_irq_single(chip, intmask, IRQF_SHARED, irq); - if (!(chip->flags & TPM_CHIP_FLAG_IRQ)) + if (!(chip->flags & TPM_CHIP_FLAG_IRQ)) { dev_err(&chip->dev, FW_BUG "TPM interrupt not working, polling instead\n"); + + disable_interrupts(chip); + } } else { tpm_tis_probe_irq(chip, intmask); } diff --git a/drivers/clk/at91/clk-usb.c b/drivers/clk/at91/clk-usb.c index 58f024c65e0909892ca5b2b2c7310bdbcfe8cc95..0611cf5caab3bd8509b0bc828d6adb321ef0abd4 100644 --- a/drivers/clk/at91/clk-usb.c +++ b/drivers/clk/at91/clk-usb.c @@ -78,6 +78,9 @@ static int at91sam9x5_clk_usb_determine_rate(struct clk_hw *hw, tmp_parent_rate = req->rate * div; tmp_parent_rate = clk_hw_round_rate(parent, tmp_parent_rate); + if (!tmp_parent_rate) + continue; + tmp_rate = DIV_ROUND_CLOSEST(tmp_parent_rate, div); if (tmp_rate < req->rate) tmp_diff = req->rate - tmp_rate; diff --git a/drivers/clk/ingenic/jz4770-cgu.c b/drivers/clk/ingenic/jz4770-cgu.c index bf46a0df2004e22d58d7ee7494c2d532e178f3a8..e3057bb5ffd840172484bb958ad6ac72190b4e7f 100644 --- a/drivers/clk/ingenic/jz4770-cgu.c +++ b/drivers/clk/ingenic/jz4770-cgu.c @@ -436,8 +436,10 @@ static void __init jz4770_cgu_init(struct device_node *np) cgu = ingenic_cgu_new(jz4770_cgu_clocks, ARRAY_SIZE(jz4770_cgu_clocks), np); - if (!cgu) + if (!cgu) { pr_err("%s: failed to initialise CGU\n", __func__); + return; + } retval = ingenic_cgu_register_clocks(cgu); if (retval) diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c index ee693e15d9ebc68f24d0754a34f2a2068ea8d648..f420f0c9687757c19d0e21e5d785817b5dcaeab7 100644 --- a/drivers/clk/qcom/clk-rcg2.c +++ b/drivers/clk/qcom/clk-rcg2.c @@ -105,7 +105,7 @@ static int update_config(struct clk_rcg2 *rcg) } WARN(1, "%s: rcg didn't update its configuration.", name); - return 0; + return -EBUSY; } static int clk_rcg2_set_parent(struct clk_hw *hw, u8 index) diff --git a/drivers/clk/tegra/clk-tegra-pmc.c b/drivers/clk/tegra/clk-tegra-pmc.c index a35579a3f884fbd3fa920e6d30950144c385c3f2..476dab494c44d3d5ea802ea863281fe585ae6edc 100644 --- a/drivers/clk/tegra/clk-tegra-pmc.c +++ b/drivers/clk/tegra/clk-tegra-pmc.c @@ -60,16 +60,16 @@ struct pmc_clk_init_data { static DEFINE_SPINLOCK(clk_out_lock); -static const char *clk_out1_parents[] = { "clk_m", "clk_m_div2", - "clk_m_div4", "extern1", +static const char *clk_out1_parents[] = { "osc", "osc_div2", + "osc_div4", "extern1", }; -static const char *clk_out2_parents[] = { "clk_m", "clk_m_div2", - "clk_m_div4", "extern2", +static const char *clk_out2_parents[] = { "osc", "osc_div2", + "osc_div4", "extern2", }; -static const char *clk_out3_parents[] = { "clk_m", "clk_m_div2", - "clk_m_div4", "extern3", +static const char *clk_out3_parents[] = { "osc", "osc_div2", + "osc_div4", "extern3", }; static struct pmc_clk_init_data pmc_clks[] = { diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index 921aca64858d02edae4dc5d010b37cfffddf684c..9566e2cb2bef4db9589ca1ef91f180d7d11f7958 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c @@ -948,6 +948,7 @@ u32 arch_timer_get_rate(void) { return arch_timer_rate; } +EXPORT_SYMBOL_GPL(arch_timer_get_rate); bool arch_timer_evtstrm_available(void) { diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c index d8c3595e90236e5f9d87ca9b5f55a7cbdb76ccdc..a0cbbdfc77359f0cd34618a4df992145882f0b67 100644 --- a/drivers/cpufreq/imx6q-cpufreq.c +++ b/drivers/cpufreq/imx6q-cpufreq.c @@ -310,6 +310,9 @@ static int imx6ul_opp_check_speed_grading(struct device *dev) void __iomem *base; np = of_find_compatible_node(NULL, NULL, "fsl,imx6ul-ocotp"); + if (!np) + np = of_find_compatible_node(NULL, NULL, + "fsl,imx6ull-ocotp"); if (!np) return -ENOENT; diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c index 5fff39dae625777b25663f06c5183289ef4cd8c2..687c92ef76440bb96b6ae5c8662f03f72dbdc3b3 100644 --- a/drivers/cpufreq/powernv-cpufreq.c +++ b/drivers/cpufreq/powernv-cpufreq.c @@ -1081,6 +1081,12 @@ static int init_chip_info(void) static inline void clean_chip_info(void) { + int i; + + /* flush any pending work items */ + if (chips) + for (i = 0; i < nr_chips; i++) + cancel_work_sync(&chips[i].throttle); kfree(chips); } diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 96a3a9bf8b1294a89cb38ebeea323555fac3785b..afb49ff6a09477f07fa1b6a72f5ee4c4dc18e55c 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -28,6 +28,7 @@ DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices); DEFINE_PER_CPU(struct cpuidle_device, cpuidle_dev); +EXPORT_SYMBOL_GPL(cpuidle_dev); DEFINE_MUTEX(cpuidle_lock); LIST_HEAD(cpuidle_detected_devices); diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c index 9fed1b82929278d08d3aa8ef26c77ebd705fbd1f..e3321b9cf501a439f4691701461f5d5249319589 100644 --- a/drivers/cpuidle/governor.c +++ b/drivers/cpuidle/governor.c @@ -95,6 +95,7 @@ int cpuidle_register_governor(struct cpuidle_governor *gov) return ret; } +EXPORT_SYMBOL_GPL(cpuidle_register_governor); /** * cpuidle_governor_latency_req - Compute a latency constraint for CPU diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c index edacf9b39b638ce543a20eec9b0259654c11e088..ceb033930535f976178d2acdcce555bf0fd3b3be 100644 --- a/drivers/crypto/caam/caamalg_desc.c +++ b/drivers/crypto/caam/caamalg_desc.c @@ -1457,7 +1457,13 @@ EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_givencap); */ void cnstr_shdsc_xts_ablkcipher_encap(u32 * const desc, struct alginfo *cdata) { - __be64 sector_size = cpu_to_be64(512); + /* + * Set sector size to a big value, practically disabling + * sector size segmentation in xts implementation. We cannot + * take full advantage of this HW feature with existing + * crypto API / dm-crypt SW architecture. + */ + __be64 sector_size = cpu_to_be64(BIT(15)); u32 *key_jump_cmd; init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX); @@ -1509,7 +1515,13 @@ EXPORT_SYMBOL(cnstr_shdsc_xts_ablkcipher_encap); */ void cnstr_shdsc_xts_ablkcipher_decap(u32 * const desc, struct alginfo *cdata) { - __be64 sector_size = cpu_to_be64(512); + /* + * Set sector size to a big value, practically disabling + * sector size segmentation in xts implementation. We cannot + * take full advantage of this HW feature with existing + * crypto API / dm-crypt SW architecture. + */ + __be64 sector_size = cpu_to_be64(BIT(15)); u32 *key_jump_cmd; init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX); diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c index aa6b45bc13b983ca382df769d4e0df9b2935d603..57aac15a335f59a46a2b5cceaeb866058a28c8e9 100644 --- a/drivers/crypto/ccree/cc_aead.c +++ b/drivers/crypto/ccree/cc_aead.c @@ -731,7 +731,7 @@ static void cc_set_assoc_desc(struct aead_request *areq, unsigned int flow_mode, dev_dbg(dev, "ASSOC buffer type DLLI\n"); hw_desc_init(&desc[idx]); set_din_type(&desc[idx], DMA_DLLI, sg_dma_address(areq->src), - areq->assoclen, NS_BIT); + areq_ctx->assoclen, NS_BIT); set_flow_mode(&desc[idx], flow_mode); if (ctx->auth_mode == DRV_HASH_XCBC_MAC && areq_ctx->cryptlen > 0) @@ -1080,9 +1080,11 @@ static void cc_proc_header_desc(struct aead_request *req, struct cc_hw_desc desc[], unsigned int *seq_size) { + struct aead_req_ctx *areq_ctx = aead_request_ctx(req); unsigned int idx = *seq_size; + /* Hash associated data */ - if (req->assoclen > 0) + if (areq_ctx->assoclen > 0) cc_set_assoc_desc(req, DIN_HASH, desc, &idx); /* Hash IV */ @@ -1310,7 +1312,7 @@ static int validate_data_size(struct cc_aead_ctx *ctx, { struct aead_req_ctx *areq_ctx = aead_request_ctx(req); struct device *dev = drvdata_to_dev(ctx->drvdata); - unsigned int assoclen = req->assoclen; + unsigned int assoclen = areq_ctx->assoclen; unsigned int cipherlen = (direct == DRV_CRYPTO_DIRECTION_DECRYPT) ? (req->cryptlen - ctx->authsize) : req->cryptlen; @@ -1469,7 +1471,7 @@ static int cc_ccm(struct aead_request *req, struct cc_hw_desc desc[], idx++; /* process assoc data */ - if (req->assoclen > 0) { + if (req_ctx->assoclen > 0) { cc_set_assoc_desc(req, DIN_HASH, desc, &idx); } else { hw_desc_init(&desc[idx]); @@ -1561,7 +1563,7 @@ static int config_ccm_adata(struct aead_request *req) * NIST Special Publication 800-38C */ *b0 |= (8 * ((m - 2) / 2)); - if (req->assoclen > 0) + if (req_ctx->assoclen > 0) *b0 |= 64; /* Enable bit 6 if Adata exists. */ rc = set_msg_len(b0 + 16 - l, cryptlen, l); /* Write L'. */ @@ -1572,7 +1574,7 @@ static int config_ccm_adata(struct aead_request *req) /* END of "taken from crypto/ccm.c" */ /* l(a) - size of associated data. */ - req_ctx->ccm_hdr_size = format_ccm_a0(a0, req->assoclen); + req_ctx->ccm_hdr_size = format_ccm_a0(a0, req_ctx->assoclen); memset(req->iv + 15 - req->iv[0], 0, req->iv[0] + 1); req->iv[15] = 1; @@ -1604,7 +1606,7 @@ static void cc_proc_rfc4309_ccm(struct aead_request *req) memcpy(areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET, req->iv, CCM_BLOCK_IV_SIZE); req->iv = areq_ctx->ctr_iv; - req->assoclen -= CCM_BLOCK_IV_SIZE; + areq_ctx->assoclen -= CCM_BLOCK_IV_SIZE; } static void cc_set_ghash_desc(struct aead_request *req, @@ -1812,7 +1814,7 @@ static int cc_gcm(struct aead_request *req, struct cc_hw_desc desc[], // for gcm and rfc4106. cc_set_ghash_desc(req, desc, seq_size); /* process(ghash) assoc data */ - if (req->assoclen > 0) + if (req_ctx->assoclen > 0) cc_set_assoc_desc(req, DIN_HASH, desc, seq_size); cc_set_gctr_desc(req, desc, seq_size); /* process(gctr+ghash) */ @@ -1836,8 +1838,8 @@ static int config_gcm_context(struct aead_request *req) (req->cryptlen - ctx->authsize); __be32 counter = cpu_to_be32(2); - dev_dbg(dev, "%s() cryptlen = %d, req->assoclen = %d ctx->authsize = %d\n", - __func__, cryptlen, req->assoclen, ctx->authsize); + dev_dbg(dev, "%s() cryptlen = %d, req_ctx->assoclen = %d ctx->authsize = %d\n", + __func__, cryptlen, req_ctx->assoclen, ctx->authsize); memset(req_ctx->hkey, 0, AES_BLOCK_SIZE); @@ -1853,7 +1855,7 @@ static int config_gcm_context(struct aead_request *req) if (!req_ctx->plaintext_authenticate_only) { __be64 temp64; - temp64 = cpu_to_be64(req->assoclen * 8); + temp64 = cpu_to_be64(req_ctx->assoclen * 8); memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64)); temp64 = cpu_to_be64(cryptlen * 8); memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8); @@ -1863,8 +1865,8 @@ static int config_gcm_context(struct aead_request *req) */ __be64 temp64; - temp64 = cpu_to_be64((req->assoclen + GCM_BLOCK_RFC4_IV_SIZE + - cryptlen) * 8); + temp64 = cpu_to_be64((req_ctx->assoclen + + GCM_BLOCK_RFC4_IV_SIZE + cryptlen) * 8); memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64)); temp64 = 0; memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8); @@ -1884,7 +1886,7 @@ static void cc_proc_rfc4_gcm(struct aead_request *req) memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_IV_OFFSET, req->iv, GCM_BLOCK_RFC4_IV_SIZE); req->iv = areq_ctx->ctr_iv; - req->assoclen -= GCM_BLOCK_RFC4_IV_SIZE; + areq_ctx->assoclen -= GCM_BLOCK_RFC4_IV_SIZE; } static int cc_proc_aead(struct aead_request *req, @@ -1909,7 +1911,7 @@ static int cc_proc_aead(struct aead_request *req, /* Check data length according to mode */ if (validate_data_size(ctx, direct, req)) { dev_err(dev, "Unsupported crypt/assoc len %d/%d.\n", - req->cryptlen, req->assoclen); + req->cryptlen, areq_ctx->assoclen); crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN); return -EINVAL; } @@ -2058,8 +2060,11 @@ static int cc_aead_encrypt(struct aead_request *req) struct aead_req_ctx *areq_ctx = aead_request_ctx(req); int rc; + memset(areq_ctx, 0, sizeof(*areq_ctx)); + /* No generated IV required */ areq_ctx->backup_iv = req->iv; + areq_ctx->assoclen = req->assoclen; areq_ctx->backup_giv = NULL; areq_ctx->is_gcm4543 = false; @@ -2087,8 +2092,11 @@ static int cc_rfc4309_ccm_encrypt(struct aead_request *req) goto out; } + memset(areq_ctx, 0, sizeof(*areq_ctx)); + /* No generated IV required */ areq_ctx->backup_iv = req->iv; + areq_ctx->assoclen = req->assoclen; areq_ctx->backup_giv = NULL; areq_ctx->is_gcm4543 = true; @@ -2106,8 +2114,11 @@ static int cc_aead_decrypt(struct aead_request *req) struct aead_req_ctx *areq_ctx = aead_request_ctx(req); int rc; + memset(areq_ctx, 0, sizeof(*areq_ctx)); + /* No generated IV required */ areq_ctx->backup_iv = req->iv; + areq_ctx->assoclen = req->assoclen; areq_ctx->backup_giv = NULL; areq_ctx->is_gcm4543 = false; @@ -2133,8 +2144,11 @@ static int cc_rfc4309_ccm_decrypt(struct aead_request *req) goto out; } + memset(areq_ctx, 0, sizeof(*areq_ctx)); + /* No generated IV required */ areq_ctx->backup_iv = req->iv; + areq_ctx->assoclen = req->assoclen; areq_ctx->backup_giv = NULL; areq_ctx->is_gcm4543 = true; @@ -2250,8 +2264,11 @@ static int cc_rfc4106_gcm_encrypt(struct aead_request *req) goto out; } + memset(areq_ctx, 0, sizeof(*areq_ctx)); + /* No generated IV required */ areq_ctx->backup_iv = req->iv; + areq_ctx->assoclen = req->assoclen; areq_ctx->backup_giv = NULL; areq_ctx->plaintext_authenticate_only = false; @@ -2273,11 +2290,14 @@ static int cc_rfc4543_gcm_encrypt(struct aead_request *req) struct aead_req_ctx *areq_ctx = aead_request_ctx(req); int rc; + memset(areq_ctx, 0, sizeof(*areq_ctx)); + //plaintext is not encryped with rfc4543 areq_ctx->plaintext_authenticate_only = true; /* No generated IV required */ areq_ctx->backup_iv = req->iv; + areq_ctx->assoclen = req->assoclen; areq_ctx->backup_giv = NULL; cc_proc_rfc4_gcm(req); @@ -2305,8 +2325,11 @@ static int cc_rfc4106_gcm_decrypt(struct aead_request *req) goto out; } + memset(areq_ctx, 0, sizeof(*areq_ctx)); + /* No generated IV required */ areq_ctx->backup_iv = req->iv; + areq_ctx->assoclen = req->assoclen; areq_ctx->backup_giv = NULL; areq_ctx->plaintext_authenticate_only = false; @@ -2328,11 +2351,14 @@ static int cc_rfc4543_gcm_decrypt(struct aead_request *req) struct aead_req_ctx *areq_ctx = aead_request_ctx(req); int rc; + memset(areq_ctx, 0, sizeof(*areq_ctx)); + //plaintext is not decryped with rfc4543 areq_ctx->plaintext_authenticate_only = true; /* No generated IV required */ areq_ctx->backup_iv = req->iv; + areq_ctx->assoclen = req->assoclen; areq_ctx->backup_giv = NULL; cc_proc_rfc4_gcm(req); diff --git a/drivers/crypto/ccree/cc_aead.h b/drivers/crypto/ccree/cc_aead.h index 5edf3b351fa44a785a935389932199e587852b6a..74bc99067f180512ec406690cdd421a0fb632c01 100644 --- a/drivers/crypto/ccree/cc_aead.h +++ b/drivers/crypto/ccree/cc_aead.h @@ -67,6 +67,7 @@ struct aead_req_ctx { u8 backup_mac[MAX_MAC_SIZE]; u8 *backup_iv; /*store iv for generated IV flow*/ u8 *backup_giv; /*store iv for rfc3686(ctr) flow*/ + u32 assoclen; /* internal assoclen */ dma_addr_t mac_buf_dma_addr; /* internal ICV DMA buffer */ /* buffer for internal ccm configurations */ dma_addr_t ccm_iv0_dma_addr; diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c index 90b4870078fb724e63de45518c47c6b40d56fd08..630020255941a500a9325e6d6b8263367a72af81 100644 --- a/drivers/crypto/ccree/cc_buffer_mgr.c +++ b/drivers/crypto/ccree/cc_buffer_mgr.c @@ -65,7 +65,7 @@ static void cc_copy_mac(struct device *dev, struct aead_request *req, { struct aead_req_ctx *areq_ctx = aead_request_ctx(req); struct crypto_aead *tfm = crypto_aead_reqtfm(req); - u32 skip = req->assoclen + req->cryptlen; + u32 skip = areq_ctx->assoclen + req->cryptlen; if (areq_ctx->is_gcm4543) skip += crypto_aead_ivsize(tfm); @@ -460,10 +460,8 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx, /* Map the src SGL */ rc = cc_map_sg(dev, src, nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents); - if (rc) { - rc = -ENOMEM; + if (rc) goto cipher_exit; - } if (mapped_nents > 1) req_ctx->dma_buf_type = CC_DMA_BUF_MLLI; @@ -477,12 +475,11 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx, } } else { /* Map the dst sg */ - if (cc_map_sg(dev, dst, nbytes, DMA_BIDIRECTIONAL, - &req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES, - &dummy, &mapped_nents)) { - rc = -ENOMEM; + rc = cc_map_sg(dev, dst, nbytes, DMA_BIDIRECTIONAL, + &req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES, + &dummy, &mapped_nents); + if (rc) goto cipher_exit; - } if (mapped_nents > 1) req_ctx->dma_buf_type = CC_DMA_BUF_MLLI; @@ -577,8 +574,8 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req) dev_dbg(dev, "Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n", sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents, - req->assoclen, req->cryptlen); - size_to_unmap = req->assoclen + req->cryptlen; + areq_ctx->assoclen, req->cryptlen); + size_to_unmap = areq_ctx->assoclen + req->cryptlen; if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT) size_to_unmap += areq_ctx->req_authsize; if (areq_ctx->is_gcm4543) @@ -720,7 +717,7 @@ static int cc_aead_chain_assoc(struct cc_drvdata *drvdata, struct scatterlist *current_sg = req->src; struct crypto_aead *tfm = crypto_aead_reqtfm(req); unsigned int sg_index = 0; - u32 size_of_assoc = req->assoclen; + u32 size_of_assoc = areq_ctx->assoclen; struct device *dev = drvdata_to_dev(drvdata); if (areq_ctx->is_gcm4543) @@ -731,7 +728,7 @@ static int cc_aead_chain_assoc(struct cc_drvdata *drvdata, goto chain_assoc_exit; } - if (req->assoclen == 0) { + if (areq_ctx->assoclen == 0) { areq_ctx->assoc_buff_type = CC_DMA_BUF_NULL; areq_ctx->assoc.nents = 0; areq_ctx->assoc.mlli_nents = 0; @@ -791,7 +788,7 @@ static int cc_aead_chain_assoc(struct cc_drvdata *drvdata, cc_dma_buf_type(areq_ctx->assoc_buff_type), areq_ctx->assoc.nents); cc_add_sg_entry(dev, sg_data, areq_ctx->assoc.nents, req->src, - req->assoclen, 0, is_last, + areq_ctx->assoclen, 0, is_last, &areq_ctx->assoc.mlli_nents); areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI; } @@ -975,11 +972,11 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata, u32 src_mapped_nents = 0, dst_mapped_nents = 0; u32 offset = 0; /* non-inplace mode */ - unsigned int size_for_map = req->assoclen + req->cryptlen; + unsigned int size_for_map = areq_ctx->assoclen + req->cryptlen; struct crypto_aead *tfm = crypto_aead_reqtfm(req); u32 sg_index = 0; bool is_gcm4543 = areq_ctx->is_gcm4543; - u32 size_to_skip = req->assoclen; + u32 size_to_skip = areq_ctx->assoclen; if (is_gcm4543) size_to_skip += crypto_aead_ivsize(tfm); @@ -1023,9 +1020,13 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata, areq_ctx->src_offset = offset; if (req->src != req->dst) { - size_for_map = req->assoclen + req->cryptlen; - size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? - authsize : 0; + size_for_map = areq_ctx->assoclen + req->cryptlen; + + if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) + size_for_map += authsize; + else + size_for_map -= authsize; + if (is_gcm4543) size_for_map += crypto_aead_ivsize(tfm); @@ -1033,10 +1034,8 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata, &areq_ctx->dst.nents, LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes, &dst_mapped_nents); - if (rc) { - rc = -ENOMEM; + if (rc) goto chain_data_exit; - } } dst_mapped_nents = cc_get_sgl_nents(dev, req->dst, size_for_map, @@ -1190,11 +1189,10 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req) } areq_ctx->ccm_iv0_dma_addr = dma_addr; - if (cc_set_aead_conf_buf(dev, areq_ctx, areq_ctx->ccm_config, - &sg_data, req->assoclen)) { - rc = -ENOMEM; + rc = cc_set_aead_conf_buf(dev, areq_ctx, areq_ctx->ccm_config, + &sg_data, areq_ctx->assoclen); + if (rc) goto aead_map_failure; - } } if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) { @@ -1243,10 +1241,12 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req) areq_ctx->gcm_iv_inc2_dma_addr = dma_addr; } - size_to_map = req->cryptlen + req->assoclen; - if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT) + size_to_map = req->cryptlen + areq_ctx->assoclen; + /* If we do in-place encryption, we also need the auth tag */ + if ((areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT) && + (req->src == req->dst)) { size_to_map += authsize; - + } if (is_gcm4543) size_to_map += crypto_aead_ivsize(tfm); rc = cc_map_sg(dev, req->src, size_to_map, DMA_BIDIRECTIONAL, @@ -1254,10 +1254,8 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req) (LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES + LLI_MAX_NUM_OF_DATA_ENTRIES), &dummy, &mapped_nents); - if (rc) { - rc = -ENOMEM; + if (rc) goto aead_map_failure; - } if (areq_ctx->is_single_pass) { /* @@ -1341,6 +1339,7 @@ int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx, struct mlli_params *mlli_params = &areq_ctx->mlli_params; struct buffer_array sg_data; struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle; + int rc = 0; u32 dummy = 0; u32 mapped_nents = 0; @@ -1360,18 +1359,18 @@ int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx, /*TODO: copy data in case that buffer is enough for operation */ /* map the previous buffer */ if (*curr_buff_cnt) { - if (cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt, - &sg_data)) { - return -ENOMEM; - } + rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt, + &sg_data); + if (rc) + return rc; } if (src && nbytes > 0 && do_update) { - if (cc_map_sg(dev, src, nbytes, DMA_TO_DEVICE, - &areq_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES, - &dummy, &mapped_nents)) { + rc = cc_map_sg(dev, src, nbytes, DMA_TO_DEVICE, + &areq_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES, + &dummy, &mapped_nents); + if (rc) goto unmap_curr_buff; - } if (src && mapped_nents == 1 && areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) { memcpy(areq_ctx->buff_sg, src, @@ -1390,7 +1389,8 @@ int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx, /* add the src data to the sg_data */ cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, nbytes, 0, true, &areq_ctx->mlli_nents); - if (cc_generate_mlli(dev, &sg_data, mlli_params, flags)) + rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags); + if (rc) goto fail_unmap_din; } /* change the buffer index for the unmap function */ @@ -1406,7 +1406,7 @@ int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx, if (*curr_buff_cnt) dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE); - return -ENOMEM; + return rc; } int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx, @@ -1425,6 +1425,7 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx, struct buffer_array sg_data; struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle; unsigned int swap_index = 0; + int rc = 0; u32 dummy = 0; u32 mapped_nents = 0; @@ -1469,21 +1470,21 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx, } if (*curr_buff_cnt) { - if (cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt, - &sg_data)) { - return -ENOMEM; - } + rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt, + &sg_data); + if (rc) + return rc; /* change the buffer index for next operation */ swap_index = 1; } if (update_data_len > *curr_buff_cnt) { - if (cc_map_sg(dev, src, (update_data_len - *curr_buff_cnt), - DMA_TO_DEVICE, &areq_ctx->in_nents, - LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, - &mapped_nents)) { + rc = cc_map_sg(dev, src, (update_data_len - *curr_buff_cnt), + DMA_TO_DEVICE, &areq_ctx->in_nents, + LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, + &mapped_nents); + if (rc) goto unmap_curr_buff; - } if (mapped_nents == 1 && areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) { /* only one entry in the SG and no previous data */ @@ -1503,7 +1504,8 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx, cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, (update_data_len - *curr_buff_cnt), 0, true, &areq_ctx->mlli_nents); - if (cc_generate_mlli(dev, &sg_data, mlli_params, flags)) + rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags); + if (rc) goto fail_unmap_din; } areq_ctx->buff_index = (areq_ctx->buff_index ^ swap_index); @@ -1517,7 +1519,7 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx, if (*curr_buff_cnt) dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE); - return -ENOMEM; + return rc; } void cc_unmap_hash_request(struct device *dev, void *ctx, diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c index b926098f70ffdb213bbfb1267cf6e473cfd880b4..b0c592073a4a3e99d8b8e1d7188a9ee6101d6009 100644 --- a/drivers/crypto/mxs-dcp.c +++ b/drivers/crypto/mxs-dcp.c @@ -25,6 +25,7 @@ #include #include #include +#include #define DCP_MAX_CHANS 4 #define DCP_BUF_SZ PAGE_SIZE @@ -36,11 +37,11 @@ * Null hashes to align with hw behavior on imx6sl and ull * these are flipped for consistency with hw output */ -const uint8_t sha1_null_hash[] = +static const uint8_t sha1_null_hash[] = "\x09\x07\xd8\xaf\x90\x18\x60\x95\xef\xbf" "\x55\x32\x0d\x4b\x6b\x5e\xee\xa3\x39\xda"; -const uint8_t sha256_null_hash[] = +static const uint8_t sha256_null_hash[] = "\x55\xb8\x52\x78\x1b\x99\x95\xa4" "\x4c\x93\x9b\x64\xe4\x41\xae\x27" "\x24\xb9\x6f\x99\xc8\xf4\xfb\x9a" @@ -621,49 +622,46 @@ static int dcp_sha_req_to_buf(struct crypto_async_request *arq) struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm); struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req); struct hash_alg_common *halg = crypto_hash_alg_common(tfm); - const int nents = sg_nents(req->src); uint8_t *in_buf = sdcp->coh->sha_in_buf; uint8_t *out_buf = sdcp->coh->sha_out_buf; - uint8_t *src_buf; - struct scatterlist *src; - unsigned int i, len, clen; + unsigned int i, len, clen, oft = 0; int ret; int fin = rctx->fini; if (fin) rctx->fini = 0; - for_each_sg(req->src, src, nents, i) { - src_buf = sg_virt(src); - len = sg_dma_len(src); - - do { - if (actx->fill + len > DCP_BUF_SZ) - clen = DCP_BUF_SZ - actx->fill; - else - clen = len; - - memcpy(in_buf + actx->fill, src_buf, clen); - len -= clen; - src_buf += clen; - actx->fill += clen; + src = req->src; + len = req->nbytes; - /* - * If we filled the buffer and still have some - * more data, submit the buffer. - */ - if (len && actx->fill == DCP_BUF_SZ) { - ret = mxs_dcp_run_sha(req); - if (ret) - return ret; - actx->fill = 0; - rctx->init = 0; - } - } while (len); + while (len) { + if (actx->fill + len > DCP_BUF_SZ) + clen = DCP_BUF_SZ - actx->fill; + else + clen = len; + + scatterwalk_map_and_copy(in_buf + actx->fill, src, oft, clen, + 0); + + len -= clen; + oft += clen; + actx->fill += clen; + + /* + * If we filled the buffer and still have some + * more data, submit the buffer. + */ + if (len && actx->fill == DCP_BUF_SZ) { + ret = mxs_dcp_run_sha(req); + if (ret) + return ret; + actx->fill = 0; + rctx->init = 0; + } } if (fin) { diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c index 95f5712db6db1cbd3a234bcd750e658239cf0c2d..d6a04e1f6e5709f5cf2571edc2e7cdf6c7ce4c0b 100644 --- a/drivers/devfreq/devfreq.c +++ b/drivers/devfreq/devfreq.c @@ -543,8 +543,6 @@ static int devfreq_notifier_call(struct notifier_block *nb, unsigned long type, mutex_lock(&devfreq->lock); devfreq->scaling_min_freq = find_available_min_freq(devfreq); - if (!devfreq->scaling_min_freq) - goto out; devfreq->scaling_max_freq = find_available_max_freq(devfreq); if (!devfreq->scaling_max_freq) { @@ -648,11 +646,6 @@ struct devfreq *devfreq_add_device(struct device *dev, } devfreq->scaling_min_freq = find_available_min_freq(devfreq); - if (!devfreq->scaling_min_freq) { - mutex_unlock(&devfreq->lock); - err = -EINVAL; - goto err_dev; - } devfreq->min_freq = devfreq->scaling_min_freq; devfreq->scaling_max_freq = find_available_max_freq(devfreq); @@ -1055,7 +1048,7 @@ static ssize_t governor_store(struct device *dev, struct device_attribute *attr, struct devfreq *df = to_devfreq(dev); int ret; char str_governor[DEVFREQ_NAME_LEN + 1]; - struct devfreq_governor *governor; + const struct devfreq_governor *governor, *prev_gov; ret = sscanf(buf, "%" __stringify(DEVFREQ_NAME_LEN) "s", str_governor); if (ret != 1) @@ -1089,12 +1082,21 @@ static ssize_t governor_store(struct device *dev, struct device_attribute *attr, goto gov_stop_out; } } + prev_gov = df->governor; df->governor = governor; strncpy(df->governor_name, governor->name, DEVFREQ_NAME_LEN); ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL); - if (ret) + if (ret) { dev_warn(dev, "%s: Governor %s not started(%d)\n", __func__, df->governor->name, ret); + if (prev_gov) { + df->governor = prev_gov; + strlcpy(df->governor_name, prev_gov->name, + DEVFREQ_NAME_LEN); + df->governor->event_handler(df, DEVFREQ_GOV_START, + NULL); + } + } gov_stop_out: mutex_unlock(&df->event_lock); diff --git a/drivers/devfreq/governor_simpleondemand.c b/drivers/devfreq/governor_simpleondemand.c index 28e0f2de7100ee0cee83b1e1179c750694b942af..532801b86a8f6de60b65984251e15f6bb6e67aeb 100644 --- a/drivers/devfreq/governor_simpleondemand.c +++ b/drivers/devfreq/governor_simpleondemand.c @@ -28,6 +28,7 @@ static int devfreq_simple_ondemand_func(struct devfreq *df, unsigned int dfso_downdifferential = DFSO_DOWNDIFFERENCTIAL; struct devfreq_simple_ondemand_data *data = df->data; unsigned long max = (df->max_freq) ? df->max_freq : UINT_MAX; + unsigned long min = (df->min_freq) ? df->min_freq : 0; err = devfreq_update_stats(df); if (err) @@ -45,18 +46,30 @@ static int devfreq_simple_ondemand_func(struct devfreq *df, dfso_upthreshold < dfso_downdifferential) return -EINVAL; - /* Assume MAX if it is going to be divided by zero */ - if (stat->total_time == 0) { - *freq = max; - return 0; - } - /* Prevent overflow */ if (stat->busy_time >= (1 << 24) || stat->total_time >= (1 << 24)) { stat->busy_time >>= 7; stat->total_time >>= 7; } + if (data && data->simple_scaling) { + if (stat->busy_time * 100 > + stat->total_time * dfso_upthreshold) + *freq = max; + else if (stat->busy_time * 100 < + stat->total_time * dfso_downdifferential) + *freq = min; + else + *freq = df->previous_freq; + return 0; + } + + /* Assume MAX if it is going to be divided by zero */ + if (stat->total_time == 0) { + *freq = max; + return 0; + } + /* Set MAX if it's busy enough */ if (stat->busy_time * 100 > stat->total_time * dfso_upthreshold) { diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 55efd0affe93d686b250c155bcbdcca423abfea4..033c3fad7a235b49356c9939cde32305532cc9a2 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -34,6 +34,9 @@ #include #include #include +#include +#include +#include #include #include @@ -1240,6 +1243,18 @@ int dma_buf_get_flags(struct dma_buf *dmabuf, unsigned long *flags) } EXPORT_SYMBOL_GPL(dma_buf_get_flags); +int dma_buf_get_uuid(struct dma_buf *dmabuf, uuid_t *uuid) +{ + if (WARN_ON(!dmabuf) || !uuid) + return -EINVAL; + + if (!dmabuf->ops->get_uuid) + return -ENODEV; + + return dmabuf->ops->get_uuid(dmabuf, uuid); +} +EXPORT_SYMBOL_GPL(dma_buf_get_uuid); + #ifdef CONFIG_DEBUG_FS static int dma_buf_debug_show(struct seq_file *s, void *unused) { diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 7b7fba0c92532fe32258b6f039709b82355c82cb..e38a653dd208f6b1ebdb6341867e64fc6e05f5a1 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -567,8 +567,8 @@ static int dmatest_func(void *data) flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; ktime = ktime_get(); - while (!kthread_should_stop() - && !(params->iterations && total_tests >= params->iterations)) { + while (!(kthread_should_stop() || + (params->iterations && total_tests >= params->iterations))) { struct dma_async_tx_descriptor *tx = NULL; struct dmaengine_unmap_data *um; dma_addr_t *dsts; diff --git a/drivers/extcon/extcon-axp288.c b/drivers/extcon/extcon-axp288.c index a983708b77a66a91f945cce32ac49eebbbfdd913..363b403bdb51823b1ba3da6b28a01fee5dd1ac71 100644 --- a/drivers/extcon/extcon-axp288.c +++ b/drivers/extcon/extcon-axp288.c @@ -428,9 +428,40 @@ static int axp288_extcon_probe(struct platform_device *pdev) /* Start charger cable type detection */ axp288_extcon_enable(info); + device_init_wakeup(dev, true); + platform_set_drvdata(pdev, info); + + return 0; +} + +static int __maybe_unused axp288_extcon_suspend(struct device *dev) +{ + struct axp288_extcon_info *info = dev_get_drvdata(dev); + + if (device_may_wakeup(dev)) + enable_irq_wake(info->irq[VBUS_RISING_IRQ]); + return 0; } +static int __maybe_unused axp288_extcon_resume(struct device *dev) +{ + struct axp288_extcon_info *info = dev_get_drvdata(dev); + + /* + * Wakeup when a charger is connected to do charger-type + * connection and generate an extcon event which makes the + * axp288 charger driver set the input current limit. + */ + if (device_may_wakeup(dev)) + disable_irq_wake(info->irq[VBUS_RISING_IRQ]); + + return 0; +} + +static SIMPLE_DEV_PM_OPS(axp288_extcon_pm_ops, axp288_extcon_suspend, + axp288_extcon_resume); + static const struct platform_device_id axp288_extcon_table[] = { { .name = "axp288_extcon" }, {}, @@ -442,6 +473,7 @@ static struct platform_driver axp288_extcon_driver = { .id_table = axp288_extcon_table, .driver = { .name = "axp288_extcon", + .pm = &axp288_extcon_pm_ops, }, }; diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c index 8cbb2ca8340f98cdad6d276de8da86dbcab514e1..9d603b66818ccc87cb23cf0e62f052f288a822ea 100644 --- a/drivers/extcon/extcon.c +++ b/drivers/extcon/extcon.c @@ -487,6 +487,21 @@ int extcon_sync(struct extcon_dev *edev, unsigned int id) } EXPORT_SYMBOL_GPL(extcon_sync); +int extcon_blocking_sync(struct extcon_dev *edev, unsigned int id, bool val) +{ + int index; + + if (!edev) + return -EINVAL; + + index = find_cable_index_by_id(edev, id); + if (index < 0) + return index; + + return blocking_notifier_call_chain(&edev->bnh[index], val, edev); +} +EXPORT_SYMBOL(extcon_blocking_sync); + /** * extcon_get_state() - Get the state of an external connector. * @edev: the extcon device @@ -941,6 +956,22 @@ int extcon_register_blocking_notifier(struct extcon_dev *edev, unsigned int id, } EXPORT_SYMBOL(extcon_register_blocking_notifier); +int extcon_unregister_blocking_notifier(struct extcon_dev *edev, + unsigned int id, struct notifier_block *nb) +{ + int idx; + + if (!edev || !nb) + return -EINVAL; + + idx = find_cable_index_by_id(edev, id); + if (idx < 0) + return idx; + + return blocking_notifier_chain_unregister(&edev->bnh[idx], nb); +} +EXPORT_SYMBOL(extcon_unregister_blocking_notifier); + /** * extcon_unregister_notifier() - Unregister a notifier block from the extcon. * @edev: the extcon device @@ -1275,6 +1306,13 @@ int extcon_dev_register(struct extcon_dev *edev) goto err_dev; } + edev->bnh = devm_kzalloc(&edev->dev, + sizeof(*edev->bnh) * edev->max_supported, GFP_KERNEL); + if (!edev->bnh) { + ret = -ENOMEM; + goto err_dev; + } + for (index = 0; index < edev->max_supported; index++) RAW_INIT_NOTIFIER_HEAD(&edev->nh[index]); diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c index c64c7da738297453ee05f173c1a6c0d4c0fadbe9..05b528c7ed8fdabecc46fcf15a10c201c40c8450 100644 --- a/drivers/firmware/arm_sdei.c +++ b/drivers/firmware/arm_sdei.c @@ -489,11 +489,6 @@ static int _sdei_event_unregister(struct sdei_event *event) { lockdep_assert_held(&sdei_events_lock); - spin_lock(&sdei_list_lock); - event->reregister = false; - event->reenable = false; - spin_unlock(&sdei_list_lock); - if (event->type == SDEI_EVENT_TYPE_SHARED) return sdei_api_event_unregister(event->event_num); @@ -516,6 +511,11 @@ int sdei_event_unregister(u32 event_num) break; } + spin_lock(&sdei_list_lock); + event->reregister = false; + event->reenable = false; + spin_unlock(&sdei_list_lock); + err = _sdei_event_unregister(event); if (err) break; @@ -583,26 +583,15 @@ static int _sdei_event_register(struct sdei_event *event) lockdep_assert_held(&sdei_events_lock); - spin_lock(&sdei_list_lock); - event->reregister = true; - spin_unlock(&sdei_list_lock); - if (event->type == SDEI_EVENT_TYPE_SHARED) return sdei_api_event_register(event->event_num, sdei_entry_point, event->registered, SDEI_EVENT_REGISTER_RM_ANY, 0); - err = sdei_do_cross_call(_local_event_register, event); - if (err) { - spin_lock(&sdei_list_lock); - event->reregister = false; - event->reenable = false; - spin_unlock(&sdei_list_lock); - + if (err) sdei_do_cross_call(_local_event_unregister, event); - } return err; } @@ -630,8 +619,17 @@ int sdei_event_register(u32 event_num, sdei_event_callback *cb, void *arg) break; } + spin_lock(&sdei_list_lock); + event->reregister = true; + spin_unlock(&sdei_list_lock); + err = _sdei_event_register(event); if (err) { + spin_lock(&sdei_list_lock); + event->reregister = false; + event->reenable = false; + spin_unlock(&sdei_list_lock); + sdei_event_destroy(event); pr_warn("Failed to register event %u: %d\n", event_num, err); diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index d54fca902e64f8ba565d6540f4a51cc511370a94..f1e0a27152691e7059f422e94740196f77cf4ffe 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -572,7 +572,7 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz, } } - if (efi_enabled(EFI_MEMMAP)) + if (!IS_ENABLED(CONFIG_X86_32) && efi_enabled(EFI_MEMMAP)) efi_memattr_init(); efi_tpm_eventlog_init(); diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci.c index c80ec1d03274413863fb8e3f917f26daa8f868c2..0863996e38df8322c240221fa715982b91415868 100644 --- a/drivers/firmware/psci.c +++ b/drivers/firmware/psci.c @@ -397,29 +397,26 @@ int psci_cpu_init_idle(unsigned int cpu) return ret; } -static int psci_suspend_finisher(unsigned long index) +static int psci_suspend_finisher(unsigned long state_id) { - u32 *state = __this_cpu_read(psci_power_state); - - return psci_ops.cpu_suspend(state[index - 1], + return psci_ops.cpu_suspend(state_id, __pa_symbol(cpu_resume)); } - -int psci_cpu_suspend_enter(unsigned long index) +int psci_cpu_suspend_enter(unsigned long state_id) { int ret; - u32 *state = __this_cpu_read(psci_power_state); + /* * idle state index 0 corresponds to wfi, should never be called * from the cpu_suspend operations */ - if (WARN_ON_ONCE(!index)) + if (WARN_ON_ONCE(!state_id)) return -EINVAL; - if (!psci_power_state_loses_context(state[index - 1])) - ret = psci_ops.cpu_suspend(state[index - 1], 0); + if (!psci_power_state_loses_context(state_id)) + ret = psci_ops.cpu_suspend(state_id, 0); else - ret = cpu_suspend(index, psci_suspend_finisher); + ret = cpu_suspend(state_id, psci_suspend_finisher); return ret; } diff --git a/drivers/gpu/Makefile b/drivers/gpu/Makefile index e9ed439a5b65329c33f450a1c82e78290581783a..99251b5b3be3e6df5934f2e20cf9e93a32309c3c 100644 --- a/drivers/gpu/Makefile +++ b/drivers/gpu/Makefile @@ -4,3 +4,4 @@ obj-$(CONFIG_TEGRA_HOST1X) += host1x/ obj-y += drm/ vga/ obj-$(CONFIG_IMX_IPUV3_CORE) += ipu-v3/ +obj-$(CONFIG_TRACE_GPU_MEM) += trace/ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index c3df75a9f65d98f213e24aa5e63261b36636de2e..e63a253eb42550e548df03c6d819d27bfe191d03 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -71,7 +71,8 @@ void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev) adev->pm.ac_power = true; else adev->pm.ac_power = false; - if (adev->powerplay.pp_funcs->enable_bapm) + if (adev->powerplay.pp_funcs && + adev->powerplay.pp_funcs->enable_bapm) amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power); mutex_unlock(&adev->pm.mutex); } diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index 4f8f3bb2183200c0c435d31486a861cbad1b3ceb..a54f8943ffa3431fbde743b73190d8ac3fb84d15 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c @@ -857,7 +857,7 @@ static int vcn_v1_0_set_clockgating_state(void *handle, if (enable) { /* wait for STATUS to clear */ - if (vcn_v1_0_is_idle(handle)) + if (!vcn_v1_0_is_idle(handle)) return -EBUSY; vcn_v1_0_enable_clock_gating(adev); } else { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 938d0053a820810de353cb07d1411b730c533a22..28022d1cb0f07cb7e16f8e0232207aa18eb25b2f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -921,9 +921,9 @@ int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size, return 0; kfd_gtt_no_free_chunk: - pr_debug("Allocation failed with mem_obj = %p\n", mem_obj); + pr_debug("Allocation failed with mem_obj = %p\n", *mem_obj); mutex_unlock(&kfd->gtt_sa_lock); - kfree(mem_obj); + kfree(*mem_obj); return -ENOMEM; } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 2b2efe443c36d27b70b4591193efe2a19648378f..b64ad9e1f0c385fef94692024c9cc407aff9e356 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -996,6 +996,26 @@ bool dc_commit_state(struct dc *dc, struct dc_state *context) return (result == DC_OK); } +static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context) +{ + int i; + struct pipe_ctx *pipe; + + for (i = 0; i < MAX_PIPES; i++) { + pipe = &context->res_ctx.pipe_ctx[i]; + + if (!pipe->plane_state) + continue; + + /* Must set to false to start with, due to OR in update function */ + pipe->plane_state->status.is_flip_pending = false; + dc->hwss.update_pending_status(pipe); + if (pipe->plane_state->status.is_flip_pending) + return true; + } + return false; +} + bool dc_post_update_surfaces_to_stream(struct dc *dc) { int i; @@ -1003,6 +1023,9 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc) post_surface_trace(dc); + if (is_flip_pending_in_pipes(dc, context)) + return true; + for (i = 0; i < dc->res_pool->pipe_count; i++) if (context->res_ctx.pipe_ctx[i].stream == NULL || context->res_ctx.pipe_ctx[i].plane_state == NULL) { diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 122249da03ab7f55707a155fe42d8ac93b9d7ae0..a4928854a3de55beaa495e9de8b62ff641f253f5 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -2440,6 +2440,17 @@ static bool retrieve_link_cap(struct dc_link *link) sink_id.ieee_device_id, sizeof(sink_id.ieee_device_id)); + /* Quirk Apple MBP 2017 15" Retina panel: Wrong DP_MAX_LINK_RATE */ + { + uint8_t str_mbp_2017[] = { 101, 68, 21, 101, 98, 97 }; + + if ((link->dpcd_caps.sink_dev_id == 0x0010fa) && + !memcmp(link->dpcd_caps.sink_dev_id_str, str_mbp_2017, + sizeof(str_mbp_2017))) { + link->reported_link_cap.link_rate = 0x0c; + } + } + core_link_read_dpcd( link, DP_SINK_HW_REVISION_START, diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c index 925e17104f9092cd1f343591a524a906c20d1b80..b9e08b06ed5dbbdc180b5730b745251919540352 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c @@ -983,6 +983,32 @@ static int init_thermal_controller( struct pp_hwmgr *hwmgr, const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table) { + hwmgr->thermal_controller.ucType = + powerplay_table->sThermalController.ucType; + hwmgr->thermal_controller.ucI2cLine = + powerplay_table->sThermalController.ucI2cLine; + hwmgr->thermal_controller.ucI2cAddress = + powerplay_table->sThermalController.ucI2cAddress; + + hwmgr->thermal_controller.fanInfo.bNoFan = + (0 != (powerplay_table->sThermalController.ucFanParameters & + ATOM_PP_FANPARAMETERS_NOFAN)); + + hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution = + powerplay_table->sThermalController.ucFanParameters & + ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK; + + hwmgr->thermal_controller.fanInfo.ulMinRPM + = powerplay_table->sThermalController.ucFanMinRPM * 100UL; + hwmgr->thermal_controller.fanInfo.ulMaxRPM + = powerplay_table->sThermalController.ucFanMaxRPM * 100UL; + + set_hw_cap(hwmgr, + ATOM_PP_THERMALCONTROLLER_NONE != hwmgr->thermal_controller.ucType, + PHM_PlatformCaps_ThermalController); + + hwmgr->thermal_controller.use_hw_fan_control = 1; + return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 6bf032e81e39fd9610e6a612e5e81b397634da39..219440bebd05236439a3fae831cc2f361048e0b5 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -3788,9 +3788,12 @@ static int smu7_trim_single_dpm_states(struct pp_hwmgr *hwmgr, { uint32_t i; + /* force the trim if mclk_switching is disabled to prevent flicker */ + bool force_trim = (low_limit == high_limit); for (i = 0; i < dpm_table->count; i++) { /*skip the trim if od is enabled*/ - if (!hwmgr->od_enabled && (dpm_table->dpm_levels[i].value < low_limit + if ((!hwmgr->od_enabled || force_trim) + && (dpm_table->dpm_levels[i].value < low_limit || dpm_table->dpm_levels[i].value > high_limit)) dpm_table->dpm_levels[i].enabled = false; else diff --git a/drivers/gpu/drm/bochs/bochs_hw.c b/drivers/gpu/drm/bochs/bochs_hw.c index a39b0343c197dc8a6aea00aa244b80dae1fde232..401c218567af9538f3f5416f9daa1850e7e202d6 100644 --- a/drivers/gpu/drm/bochs/bochs_hw.c +++ b/drivers/gpu/drm/bochs/bochs_hw.c @@ -97,10 +97,8 @@ int bochs_hw_init(struct drm_device *dev, uint32_t flags) size = min(size, mem); } - if (pci_request_region(pdev, 0, "bochs-drm") != 0) { - DRM_ERROR("Cannot request framebuffer\n"); - return -EBUSY; - } + if (pci_request_region(pdev, 0, "bochs-drm") != 0) + DRM_WARN("Cannot request framebuffer, boot fb still active?\n"); bochs->fb_map = ioremap(addr, size); if (bochs->fb_map == NULL) { diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index 1a4b44923aeca423ccf7067fec6022abdc6126ff..cb3cc5a2d2ef37a065cf31190ac2929dd76bc108 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -1410,6 +1410,8 @@ static int drm_atomic_connector_set_property(struct drm_connector *connector, return -EINVAL; } state->content_protection = val; + } else if (property == connector->colorspace_property) { + state->colorspace = val; } else if (property == config->writeback_fb_id_property) { struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, NULL, val); int ret = drm_atomic_set_writeback_fb_for_connector(state, fb); @@ -1507,6 +1509,8 @@ drm_atomic_connector_get_property(struct drm_connector *connector, *val = state->picture_aspect_ratio; } else if (property == config->content_type_property) { *val = state->content_type; + } else if (property == connector->colorspace_property) { + *val = state->colorspace; } else if (property == connector->scaling_mode_property) { *val = state->scaling_mode; } else if (property == connector->content_protection_property) { diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index c22062cc99923f8a04202de3ab879908a324be35..e2350dc56372da6cbb62f0ede3de53ebb70daba3 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -2818,6 +2818,7 @@ int __drm_atomic_helper_disable_plane(struct drm_plane *plane, return 0; } +EXPORT_SYMBOL_GPL(__drm_atomic_helper_disable_plane); static int update_output_state(struct drm_atomic_state *state, struct drm_mode_set *set) @@ -3003,6 +3004,7 @@ int __drm_atomic_helper_set_config(struct drm_mode_set *set, return 0; } +EXPORT_SYMBOL_GPL(__drm_atomic_helper_set_config); static int __drm_atomic_helper_disable_all(struct drm_device *dev, struct drm_modeset_acquire_ctx *ctx, diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index 486b784e4dd637b480e295bf02a5553557256340..42111d7bb18e8e0c6543bdeb9494339fc62a8eda 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c @@ -822,6 +822,55 @@ static struct drm_prop_enum_list drm_cp_enum_list[] = { }; DRM_ENUM_NAME_FN(drm_get_content_protection_name, drm_cp_enum_list) +static const struct drm_prop_enum_list hdmi_colorspaces[] = { + /* For Default case, driver will set the colorspace */ + { DRM_MODE_COLORIMETRY_DEFAULT, "Default" }, + /* Standard Definition Colorimetry based on CEA 861 */ + { DRM_MODE_COLORIMETRY_SMPTE_170M_YCC, "SMPTE_170M_YCC" }, + { DRM_MODE_COLORIMETRY_BT709_YCC, "BT709_YCC" }, + /* Standard Definition Colorimetry based on IEC 61966-2-4 */ + { DRM_MODE_COLORIMETRY_XVYCC_601, "XVYCC_601" }, + /* High Definition Colorimetry based on IEC 61966-2-4 */ + { DRM_MODE_COLORIMETRY_XVYCC_709, "XVYCC_709" }, + /* Colorimetry based on IEC 61966-2-1/Amendment 1 */ + { DRM_MODE_COLORIMETRY_SYCC_601, "SYCC_601" }, + /* Colorimetry based on IEC 61966-2-5 [33] */ + { DRM_MODE_COLORIMETRY_OPYCC_601, "opYCC_601" }, + /* Colorimetry based on IEC 61966-2-5 */ + { DRM_MODE_COLORIMETRY_OPRGB, "opRGB" }, + /* Colorimetry based on ITU-R BT.2020 */ + { DRM_MODE_COLORIMETRY_BT2020_CYCC, "BT2020_CYCC" }, + /* Colorimetry based on ITU-R BT.2020 */ + { DRM_MODE_COLORIMETRY_BT2020_RGB, "BT2020_RGB" }, + /* Colorimetry based on ITU-R BT.2020 */ + { DRM_MODE_COLORIMETRY_BT2020_YCC, "BT2020_YCC" }, + /* Added as part of Additional Colorimetry Extension in 861.G */ + { DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65, "DCI-P3_RGB_D65" }, + { DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER, "DCI-P3_RGB_Theater" }, +}; + +static const struct drm_prop_enum_list dp_colorspaces[] = { + /* For Default case, driver will set the colorspace */ + { DRM_MODE_COLORIMETRY_DEFAULT, "Default" }, + /* Standard Definition Colorimetry based on IEC 61966-2-4 */ + { DRM_MODE_COLORIMETRY_XVYCC_601, "XVYCC_601" }, + /* High Definition Colorimetry based on IEC 61966-2-4 */ + { DRM_MODE_COLORIMETRY_XVYCC_709, "XVYCC_709" }, + /* Colorimetry based on IEC 61966-2-5 */ + { DRM_MODE_COLORIMETRY_OPRGB, "opRGB" }, + { DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65, "DCI-P3_RGB_D65" }, + /* DP MSA Colorimetry */ + { DRM_MODE_DP_COLORIMETRY_BT601_YCC, "YCBCR_ITU_601" }, + { DRM_MODE_DP_COLORIMETRY_BT709_YCC, "YCBCR_ITU_709" }, + { DRM_MODE_DP_COLORIMETRY_SRGB, "sRGB" }, + { DRM_MODE_DP_COLORIMETRY_RGB_WIDE_GAMUT, "RGB Wide Gamut" }, + { DRM_MODE_DP_COLORIMETRY_SCRGB, "scRGB" }, + /* Colorimetry based on ITU-R BT.2020 */ + { DRM_MODE_COLORIMETRY_BT2020_RGB, "BT2020_RGB" }, + /* Colorimetry based on ITU-R BT.2020 */ + { DRM_MODE_COLORIMETRY_BT2020_YCC, "BT2020_YCC" }, +}; + /** * DOC: standard connector properties * @@ -1390,6 +1439,65 @@ int drm_mode_create_aspect_ratio_property(struct drm_device *dev) } EXPORT_SYMBOL(drm_mode_create_aspect_ratio_property); +/** + * DOC: standard connector properties + * + * Colorspace: + * drm_mode_create_colorspace_property - create colorspace property + * This property helps select a suitable colorspace based on the sink + * capability. Modern sink devices support wider gamut like BT2020. + * This helps switch to BT2020 mode if the BT2020 encoded video stream + * is being played by the user, same for any other colorspace. Thereby + * giving a good visual experience to users. + * + * The expectation from userspace is that it should parse the EDID + * and get supported colorspaces. Use this property and switch to the + * one supported. Sink supported colorspaces should be retrieved by + * userspace from EDID and driver will not explicitly expose them. + * + * Basically the expectation from userspace is: + * - Set up CRTC DEGAMMA/CTM/GAMMA to convert to some sink + * colorspace + * - Set this new property to let the sink know what it + * converted the CRTC output to. + * - This property is just to inform sink what colorspace + * source is trying to drive. + * + * Called by a driver the first time it's needed, must be attached to desired + * connectors. + */ +int drm_mode_create_colorspace_property(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + struct drm_property *prop; + + if (connector->connector_type == DRM_MODE_CONNECTOR_HDMIA || + connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) { + prop = drm_property_create_enum(dev, DRM_MODE_PROP_ENUM, + "Colorspace", + hdmi_colorspaces, + ARRAY_SIZE(hdmi_colorspaces)); + if (!prop) + return -ENOMEM; + } else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || + connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) { + prop = drm_property_create_enum(dev, DRM_MODE_PROP_ENUM, + "Colorspace", dp_colorspaces, + ARRAY_SIZE(dp_colorspaces)); + + if (!prop) + return -ENOMEM; + } else { + DRM_DEBUG_KMS("Colorspace property not supported\n"); + return 0; + } + + connector->colorspace_property = prop; + + return 0; +} +EXPORT_SYMBOL(drm_mode_create_colorspace_property); + /** * drm_mode_create_content_type_property - create content type property * @dev: DRM device diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index fc978603fc94360ba89522b0ea16052f8ec5e915..162c07c79d8d58b55c6ca919052259bcdc8d78db 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -51,10 +51,6 @@ static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr, int id, struct drm_dp_payload *payload); -static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr, - struct drm_dp_mst_port *port, - int offset, int size, u8 *bytes); - static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_branch *mstb); static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, @@ -439,6 +435,7 @@ static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx if (idx > raw->curlen) goto fail_len; repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx]; + idx++; if (idx > raw->curlen) goto fail_len; @@ -487,6 +484,7 @@ static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband { int idx = 1; repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf; + repmsg->u.path_resources.fec_capable = raw->msg[idx] & 0x1; idx++; if (idx > raw->curlen) goto fail_len; @@ -1402,7 +1400,6 @@ static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr, return false; } -#if 0 static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes) { struct drm_dp_sideband_msg_req_body req; @@ -1415,7 +1412,6 @@ static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 return 0; } -#endif static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr, bool up, u8 *msg, int len) @@ -1679,6 +1675,7 @@ static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, DRM_DEBUG_KMS("enum path resources %d: %d %d\n", txmsg->reply.u.path_resources.port_number, txmsg->reply.u.path_resources.full_payload_bw_number, txmsg->reply.u.path_resources.avail_payload_bw_number); port->available_pbn = txmsg->reply.u.path_resources.avail_payload_bw_number; + port->fec_capable = txmsg->reply.u.path_resources.fec_capable; } } @@ -1807,6 +1804,42 @@ int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr, } EXPORT_SYMBOL(drm_dp_send_power_updown_phy); +int drm_dp_mst_get_dsc_info(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port, + struct drm_dp_mst_dsc_info *dsc_info) +{ + if (!dsc_info) + return -EINVAL; + + port = drm_dp_get_validated_port_ref(mgr, port); + if (!port) + return -EINVAL; + + memcpy(dsc_info, &port->dsc_info, sizeof(struct drm_dp_mst_dsc_info)); + drm_dp_put_port(port); + + return 0; +} +EXPORT_SYMBOL_GPL(drm_dp_mst_get_dsc_info); + +int drm_dp_mst_update_dsc_info(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port, + struct drm_dp_mst_dsc_info *dsc_info) +{ + if (!dsc_info) + return -EINVAL; + + port = drm_dp_get_validated_port_ref(mgr, port); + if (!port) + return -EINVAL; + + memcpy(&port->dsc_info, dsc_info, sizeof(struct drm_dp_mst_dsc_info)); + drm_dp_put_port(port); + + return 0; +} +EXPORT_SYMBOL_GPL(drm_dp_mst_update_dsc_info); + static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr, int id, struct drm_dp_payload *payload) @@ -1985,10 +2018,9 @@ int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr) } EXPORT_SYMBOL(drm_dp_update_payload_part2); -#if 0 /* unused as of yet */ -static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr, - struct drm_dp_mst_port *port, - int offset, int size) +int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port, + int offset, int size, u8 *bytes) { int len; struct drm_dp_sideband_msg_tx *txmsg; @@ -1997,18 +2029,18 @@ static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr, if (!txmsg) return -ENOMEM; - len = build_dpcd_read(txmsg, port->port_num, 0, 8); + len = build_dpcd_read(txmsg, port->port_num, offset, size); txmsg->dst = port->parent; drm_dp_queue_down_tx(mgr, txmsg); return 0; } -#endif +EXPORT_SYMBOL_GPL(drm_dp_send_dpcd_read); -static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr, - struct drm_dp_mst_port *port, - int offset, int size, u8 *bytes) +int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port, + int offset, int size, u8 *bytes) { int len; int ret; @@ -2042,6 +2074,22 @@ static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr, drm_dp_put_mst_branch_device(mstb); return ret; } +EXPORT_SYMBOL_GPL(drm_dp_send_dpcd_write); + +int drm_dp_mst_get_max_sdp_streams_supported( + struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port) +{ + int ret = -1; + + port = drm_dp_get_validated_port_ref(mgr, port); + if (!port) + return ret; + ret = port->num_sdp_streams; + drm_dp_put_port(port); + return ret; +} +EXPORT_SYMBOL_GPL(drm_dp_mst_get_max_sdp_streams_supported); static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type) { @@ -2114,9 +2162,9 @@ static bool drm_dp_get_vc_payload_bw(int dp_link_bw, int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state) { int ret = 0; - int i = 0; struct drm_dp_mst_branch *mstb = NULL; + mutex_lock(&mgr->payload_lock); mutex_lock(&mgr->lock); if (mst_state == mgr->mst_state) goto out_unlock; @@ -2175,25 +2223,18 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms /* this can fail if the device is gone */ drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0); ret = 0; - mutex_lock(&mgr->payload_lock); - memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload)); + memset(mgr->payloads, 0, + mgr->max_payloads * sizeof(mgr->payloads[0])); + memset(mgr->proposed_vcpis, 0, + mgr->max_payloads * sizeof(mgr->proposed_vcpis[0])); mgr->payload_mask = 0; set_bit(0, &mgr->payload_mask); - for (i = 0; i < mgr->max_payloads; i++) { - struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i]; - - if (vcpi) { - vcpi->vcpi = 0; - vcpi->num_slots = 0; - } - mgr->proposed_vcpis[i] = NULL; - } mgr->vcpi_mask = 0; - mutex_unlock(&mgr->payload_lock); } out_unlock: mutex_unlock(&mgr->lock); + mutex_unlock(&mgr->payload_lock); if (mstb) drm_dp_put_mst_branch_device(mstb); return ret; @@ -2555,6 +2596,20 @@ bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr *mgr, } EXPORT_SYMBOL(drm_dp_mst_port_has_audio); +bool drm_dp_mst_has_fec(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port) +{ + bool ret = false; + + port = drm_dp_get_validated_port_ref(mgr, port); + if (!port) + return ret; + ret = port->fec_capable; + drm_dp_put_port(port); + return ret; +} +EXPORT_SYMBOL_GPL(drm_dp_mst_has_fec); + /** * drm_dp_mst_get_edid() - get EDID for an MST port * @connector: toplevel connector to get EDID for diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index f5926bf5dabd9a80d1d71b596aa550d07c8a2db2..0854d1e70ce72ef4702bff8aecefc50270d78d2d 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -98,6 +98,14 @@ struct detailed_mode_closure { #define LEVEL_GTF2 2 #define LEVEL_CVT 3 +/*Enum storing luminance types for HDR blocks in EDID*/ +enum luminance_value { + NO_LUMINANCE_DATA = 3, + MAXIMUM_LUMINANCE = 4, + FRAME_AVERAGE_LUMINANCE = 5, + MINIMUM_LUMINANCE = 6 +}; + static const struct edid_quirk { char vendor[4]; int product_id; @@ -2897,15 +2905,20 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid, return closure.modes; } - +#define VIDEO_CAPABILITY_EXTENDED_DATA_BLOCK 0x0 #define AUDIO_BLOCK 0x01 #define VIDEO_BLOCK 0x02 #define VENDOR_BLOCK 0x03 #define SPEAKER_BLOCK 0x04 +#define COLORIMETRY_EXTENDED_DATA_BLOCK 0x05 +#define HDR_STATIC_METADATA_BLOCK 0x6 #define USE_EXTENDED_TAG 0x07 #define EXT_VIDEO_CAPABILITY_BLOCK 0x00 #define EXT_VIDEO_DATA_BLOCK_420 0x0E #define EXT_VIDEO_CAP_BLOCK_Y420CMDB 0x0F +#define VENDOR_SPECIFIC_VIDEO_DATA_BLOCK 0x01 +#define VSVDB_HDR10_PLUS_IEEE_CODE 0x90848b +#define VSVDB_HDR10_PLUS_APP_VER_MASK 0x3 #define EDID_BASIC_AUDIO (1 << 6) #define EDID_CEA_YCRCB444 (1 << 5) #define EDID_CEA_YCRCB422 (1 << 4) @@ -3956,6 +3969,288 @@ drm_parse_hdmi_vsdb_audio(struct drm_connector *connector, const u8 *db) connector->audio_latency[1]); } +/* + * drm_extract_vcdb_info - Parse the HDMI Video Capability Data Block + * @connector: connector corresponding to the HDMI sink + * @db: start of the CEA vendor specific block + * + * Parses the HDMI VCDB to extract sink info for @connector. + */ +static void +drm_extract_vcdb_info(struct drm_connector *connector, const u8 *db) +{ + /* + * Check if the sink specifies underscan + * support for: + * BIT 5: preferred video format + * BIT 3: IT video format + * BIT 1: CE video format + */ + + connector->pt_scan_info = + (db[2] & (BIT(4) | BIT(5))) >> 4; + connector->it_scan_info = + (db[2] & (BIT(3) | BIT(2))) >> 2; + connector->ce_scan_info = + db[2] & (BIT(1) | BIT(0)); + + DRM_DEBUG_KMS("Scan Info (pt|it|ce): (%d|%d|%d)", + (int) connector->pt_scan_info, + (int) connector->it_scan_info, + (int) connector->ce_scan_info); +} + +static void +drm_parse_vsvdb_hdr_plus(struct drm_connector *connector, const u8 *db) +{ + connector->hdr_plus_app_ver = db[5] & VSVDB_HDR10_PLUS_APP_VER_MASK; +} + +static void +drm_extract_vsvdb_info(struct drm_connector *connector, const u8 *db) +{ + u8 db_len = cea_db_payload_len(db); + u32 ieee_code = 0; + + if (db_len < 5) + return; + + /* Bytes 2-4: IEEE 24-bit code, LSB first */ + ieee_code = db[2] | (db[3] << 8) | (db[4] << 16); + DRM_DEBUG_KMS("found VSVDB with IEEE code 0x%x\n", ieee_code); + if (ieee_code == VSVDB_HDR10_PLUS_IEEE_CODE) + drm_parse_vsvdb_hdr_plus(connector, db); +} + +static bool drm_edid_is_luminance_value_present( +u32 block_length, enum luminance_value value) +{ + return block_length > NO_LUMINANCE_DATA && value <= block_length; +} + +/* + * drm_extract_clrmetry_db - Parse the HDMI colorimetry extended block + * @connector: connector corresponding to the HDMI sink + * @db: start of the HDMI colorimetry extended block + * + * Parses the HDMI colorimetry block to extract sink info for @connector. + */ +static void +drm_extract_clrmetry_db(struct drm_connector *connector, const u8 *db) +{ + + if (!db) { + DRM_ERROR("invalid db\n"); + return; + } + + /* Byte 3 Bit 0: xvYCC_601 */ + if (db[2] & BIT(0)) + connector->color_enc_fmt |= DRM_EDID_CLRMETRY_xvYCC_601; + /* Byte 3 Bit 1: xvYCC_709 */ + if (db[2] & BIT(1)) + connector->color_enc_fmt |= DRM_EDID_CLRMETRY_xvYCC_709; + /* Byte 3 Bit 2: sYCC_601 */ + if (db[2] & BIT(2)) + connector->color_enc_fmt |= DRM_EDID_CLRMETRY_sYCC_601; + /* Byte 3 Bit 3: ADBYCC_601 */ + if (db[2] & BIT(3)) + connector->color_enc_fmt |= DRM_EDID_CLRMETRY_ADBYCC_601; + /* Byte 3 Bit 4: ADB_RGB */ + if (db[2] & BIT(4)) + connector->color_enc_fmt |= DRM_EDID_CLRMETRY_ADB_RGB; + /* Byte 3 Bit 5: BT2020_CYCC */ + if (db[2] & BIT(5)) + connector->color_enc_fmt |= DRM_EDID_CLRMETRY_BT2020_CYCC; + /* Byte 3 Bit 6: BT2020_YCC */ + if (db[2] & BIT(6)) + connector->color_enc_fmt |= DRM_EDID_CLRMETRY_BT2020_YCC; + /* Byte 3 Bit 7: BT2020_RGB */ + if (db[2] & BIT(7)) + connector->color_enc_fmt |= DRM_EDID_CLRMETRY_BT2020_RGB; + /* Byte 4 Bit 7: DCI-P3 */ + if (db[3] & BIT(7)) + connector->color_enc_fmt |= DRM_EDID_CLRMETRY_DCI_P3; + + DRM_DEBUG_KMS("colorimetry fmts = 0x%x\n", connector->color_enc_fmt); +} + +/* + * drm_extract_hdr_db - Parse the HDMI HDR extended block + * @connector: connector corresponding to the HDMI sink + * @db: start of the HDMI HDR extended block + * + * Parses the HDMI HDR extended block to extract sink info for @connector. + */ +static void +drm_extract_hdr_db(struct drm_connector *connector, const u8 *db) +{ + + u8 len = 0; + + if (!db) + return; + + len = db[0] & 0x1f; + /* Byte 3: Electro-Optical Transfer Functions */ + connector->hdr_eotf = db[2] & 0x3F; + + /* Byte 4: Static Metadata Descriptor Type 1 */ + connector->hdr_metadata_type_one = (db[3] & BIT(0)); + + /* Byte 5: Desired Content Maximum Luminance */ + if (drm_edid_is_luminance_value_present(len, MAXIMUM_LUMINANCE)) + connector->hdr_max_luminance = + db[MAXIMUM_LUMINANCE]; + + /* Byte 6: Desired Content Max Frame-average Luminance */ + if (drm_edid_is_luminance_value_present(len, FRAME_AVERAGE_LUMINANCE)) + connector->hdr_avg_luminance = + db[FRAME_AVERAGE_LUMINANCE]; + + /* Byte 7: Desired Content Min Luminance */ + if (drm_edid_is_luminance_value_present(len, MINIMUM_LUMINANCE)) + connector->hdr_min_luminance = + db[MINIMUM_LUMINANCE]; + + connector->hdr_supported = true; + + DRM_DEBUG_KMS("HDR electro-optical %d\n", connector->hdr_eotf); + DRM_DEBUG_KMS("metadata desc 1 %d\n", connector->hdr_metadata_type_one); + DRM_DEBUG_KMS("max luminance %d\n", connector->hdr_max_luminance); + DRM_DEBUG_KMS("avg luminance %d\n", connector->hdr_avg_luminance); + DRM_DEBUG_KMS("min luminance %d\n", connector->hdr_min_luminance); +} +/* + * drm_hdmi_extract_extended_blk_info - Parse the HDMI extended tag blocks + * @connector: connector corresponding to the HDMI sink + * @edid: handle to the EDID structure + * Parses the all extended tag blocks extract sink info for @connector. + */ +static void +drm_hdmi_extract_extended_blk_info(struct drm_connector *connector, + const struct edid *edid) +{ + const u8 *cea = drm_find_cea_extension(edid); + const u8 *db = NULL; + + if (cea && cea_revision(cea) >= 3) { + int i, start, end; + + if (cea_db_offsets(cea, &start, &end)) + return; + + for_each_cea_db(cea, i, start, end) { + db = &cea[i]; + + if (cea_db_tag(db) == USE_EXTENDED_TAG) { + DRM_DEBUG_KMS("found extended tag block = %d\n", + db[1]); + switch (db[1]) { + case VIDEO_CAPABILITY_EXTENDED_DATA_BLOCK: + drm_extract_vcdb_info(connector, db); + break; + case VENDOR_SPECIFIC_VIDEO_DATA_BLOCK: + drm_extract_vsvdb_info(connector, db); + break; + case HDR_STATIC_METADATA_BLOCK: + drm_extract_hdr_db(connector, db); + break; + case COLORIMETRY_EXTENDED_DATA_BLOCK: + drm_extract_clrmetry_db(connector, db); + break; + default: + break; + } + } + } + } +} + +static void +parse_hdmi_hf_vsdb(struct drm_connector *connector, const u8 *db) +{ + u8 len = cea_db_payload_len(db); + + if (len < 7) + return; + + if (db[4] != 1) + return; /* invalid version */ + + connector->max_tmds_char = db[5] * 5; + connector->scdc_present = db[6] & (1 << 7); + connector->rr_capable = db[6] & (1 << 6); + connector->flags_3d = db[6] & 0x7; + connector->supports_scramble = connector->scdc_present && + (db[6] & (1 << 3)); + + DRM_DEBUG_KMS( + "HDMI v2: max TMDS char %d, scdc %s, rr %s,3D flags 0x%x, scramble %s\n", + connector->max_tmds_char, + connector->scdc_present ? "available" : "not available", + connector->rr_capable ? "capable" : "not capable", + connector->flags_3d, + connector->supports_scramble ? "supported" : "not supported"); +} + +static void drm_parse_ycbcr420_deep_color_info(struct drm_connector *connector, + const u8 *db) +{ + u8 dc_mask; + struct drm_hdmi_info *hdmi = &connector->display_info.hdmi; + + dc_mask = db[7] & DRM_EDID_YCBCR420_DC_MASK; + hdmi->y420_dc_modes = dc_mask; +} + +static void drm_parse_hdmi_forum_vsdb(struct drm_connector *connector, + const u8 *hf_vsdb) +{ + struct drm_display_info *display = &connector->display_info; + struct drm_hdmi_info *hdmi = &display->hdmi; + + display->has_hdmi_infoframe = true; + + if (hf_vsdb[6] & 0x80) { + hdmi->scdc.supported = true; + if (hf_vsdb[6] & 0x40) + hdmi->scdc.read_request = true; + } + + /* + * All HDMI 2.0 monitors must support scrambling at rates > 340 MHz. + * And as per the spec, three factors confirm this: + * * Availability of a HF-VSDB block in EDID (check) + * * Non zero Max_TMDS_Char_Rate filed in HF-VSDB (let's check) + * * SCDC support available (let's check) + * Lets check it out. + */ + + if (hf_vsdb[5]) { + /* max clock is 5000 KHz times block value */ + u32 max_tmds_clock = hf_vsdb[5] * 5000; + struct drm_scdc *scdc = &hdmi->scdc; + + if (max_tmds_clock > 340000) { + display->max_tmds_clock = max_tmds_clock; + DRM_DEBUG_KMS("HF-VSDB: max TMDS clock %d kHz\n", + display->max_tmds_clock); + } + + if (scdc->supported) { + scdc->scrambling.supported = true; + + /* Few sinks support scrambling for cloks < 340M */ + if ((hf_vsdb[6] & 0x8)) + scdc->scrambling.low_rates = true; + } + } + + drm_parse_ycbcr420_deep_color_info(connector, hf_vsdb); + parse_hdmi_hf_vsdb(connector, hf_vsdb); +} + static void monitor_name(struct detailed_timing *t, void *data) { @@ -4088,6 +4383,10 @@ static void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid) /* HDMI Vendor-Specific Data Block */ if (cea_db_is_hdmi_vsdb(db)) drm_parse_hdmi_vsdb_audio(connector, db); + /* HDMI Forum Vendor-Specific Data Block */ + else if (cea_db_is_hdmi_forum_vsdb(db)) + drm_parse_hdmi_forum_vsdb(connector, + db); break; default: break; @@ -4401,62 +4700,6 @@ drm_default_rgb_quant_range(const struct drm_display_mode *mode) } EXPORT_SYMBOL(drm_default_rgb_quant_range); -static void drm_parse_ycbcr420_deep_color_info(struct drm_connector *connector, - const u8 *db) -{ - u8 dc_mask; - struct drm_hdmi_info *hdmi = &connector->display_info.hdmi; - - dc_mask = db[7] & DRM_EDID_YCBCR420_DC_MASK; - hdmi->y420_dc_modes = dc_mask; -} - -static void drm_parse_hdmi_forum_vsdb(struct drm_connector *connector, - const u8 *hf_vsdb) -{ - struct drm_display_info *display = &connector->display_info; - struct drm_hdmi_info *hdmi = &display->hdmi; - - display->has_hdmi_infoframe = true; - - if (hf_vsdb[6] & 0x80) { - hdmi->scdc.supported = true; - if (hf_vsdb[6] & 0x40) - hdmi->scdc.read_request = true; - } - - /* - * All HDMI 2.0 monitors must support scrambling at rates > 340 MHz. - * And as per the spec, three factors confirm this: - * * Availability of a HF-VSDB block in EDID (check) - * * Non zero Max_TMDS_Char_Rate filed in HF-VSDB (let's check) - * * SCDC support available (let's check) - * Lets check it out. - */ - - if (hf_vsdb[5]) { - /* max clock is 5000 KHz times block value */ - u32 max_tmds_clock = hf_vsdb[5] * 5000; - struct drm_scdc *scdc = &hdmi->scdc; - - if (max_tmds_clock > 340000) { - display->max_tmds_clock = max_tmds_clock; - DRM_DEBUG_KMS("HF-VSDB: max TMDS clock %d kHz\n", - display->max_tmds_clock); - } - - if (scdc->supported) { - scdc->scrambling.supported = true; - - /* Few sinks support scrambling for cloks < 340M */ - if ((hf_vsdb[6] & 0x8)) - scdc->scrambling.low_rates = true; - } - } - - drm_parse_ycbcr420_deep_color_info(connector, hf_vsdb); -} - static void drm_parse_hdmi_deep_color_info(struct drm_connector *connector, const u8 *hdmi) { @@ -4600,6 +4843,40 @@ drm_reset_display_info(struct drm_connector *connector) info->non_desktop = 0; } +static void +drm_hdmi_extract_vsdbs_info(struct drm_connector *connector, + const struct edid *edid) +{ + const u8 *cea = drm_find_cea_extension(edid); + const u8 *db = NULL; + + if (cea && cea_revision(cea) >= 3) { + int i, start, end; + + if (cea_db_offsets(cea, &start, &end)) + return; + + for_each_cea_db(cea, i, start, end) { + db = &cea[i]; + + if (cea_db_tag(db) == VENDOR_BLOCK) { + /* HDMI Vendor-Specific Data Block */ + if (cea_db_is_hdmi_vsdb(db)) { + drm_parse_hdmi_vsdb_video( + connector, db); + drm_parse_hdmi_vsdb_audio( + connector, db); + } + /* HDMI Forum Vendor-Specific Data Block */ + else if (cea_db_is_hdmi_forum_vsdb(db)) + drm_parse_hdmi_forum_vsdb(connector, + db); + } + } + } +} + + u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edid) { struct drm_display_info *info = &connector->display_info; @@ -4637,6 +4914,11 @@ u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edi connector->name, info->bpc); } + /* Extract audio and video latency fields for the sink */ + drm_hdmi_extract_vsdbs_info(connector, edid); + /* Extract info from extended tag blocks */ + drm_hdmi_extract_extended_blk_info(connector, edid); + /* Only defined for 1.4 with digital displays */ if (edid->revision < 4) return quirks; @@ -4706,7 +4988,7 @@ static struct drm_display_mode *drm_mode_displayid_detailed(struct drm_device *d struct drm_display_mode *mode; unsigned pixel_clock = (timings->pixel_clock[0] | (timings->pixel_clock[1] << 8) | - (timings->pixel_clock[2] << 16)); + (timings->pixel_clock[2] << 16)) + 1; unsigned hactive = (timings->hactive[0] | timings->hactive[1] << 8) + 1; unsigned hblank = (timings->hblank[0] | timings->hblank[1] << 8) + 1; unsigned hsync = (timings->hsync[0] | (timings->hsync[1] & 0x7f) << 8) + 1; diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c index 334addaca9c545ad0bdfbe86fa8ec9560afac16e..b884194f260c27a9a57609b653b9d663f21dccab 100644 --- a/drivers/gpu/drm/drm_file.c +++ b/drivers/gpu/drm/drm_file.c @@ -46,6 +46,8 @@ /* from BKL pushdown */ DEFINE_MUTEX(drm_global_mutex); +#define MAX_DRM_OPEN_COUNT 128 + /** * DOC: file operations * @@ -262,6 +264,18 @@ void drm_file_free(struct drm_file *file) kfree(file); } +static void drm_close_helper(struct file *filp) +{ + struct drm_file *file_priv = filp->private_data; + struct drm_device *dev = file_priv->minor->dev; + + mutex_lock(&dev->filelist_mutex); + list_del(&file_priv->lhead); + mutex_unlock(&dev->filelist_mutex); + + drm_file_free(file_priv); +} + static int drm_setup(struct drm_device * dev) { int ret; @@ -310,6 +324,11 @@ int drm_open(struct inode *inode, struct file *filp) if (!dev->open_count++) need_setup = 1; + if (dev->open_count >= MAX_DRM_OPEN_COUNT) { + retcode = -EPERM; + goto err_undo; + } + /* share address_space across all char-devs of a single device */ filp->f_mapping = dev->anon_inode->i_mapping; @@ -318,8 +337,10 @@ int drm_open(struct inode *inode, struct file *filp) goto err_undo; if (need_setup) { retcode = drm_setup(dev); - if (retcode) + if (retcode) { + drm_close_helper(filp); goto err_undo; + } } return 0; @@ -473,11 +494,7 @@ int drm_release(struct inode *inode, struct file *filp) DRM_DEBUG("open_count = %d\n", dev->open_count); - mutex_lock(&dev->filelist_mutex); - list_del(&file_priv->lhead); - mutex_unlock(&dev->filelist_mutex); - - drm_file_free(file_priv); + drm_close_helper(filp); if (!--dev->open_count) drm_lastclose(dev); diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c index b64a6ffc0aed72c6429e81b68527bb60b4a447a7..2fefca4029d1d078b464dff5c21d0767d4162f73 100644 --- a/drivers/gpu/drm/drm_framebuffer.c +++ b/drivers/gpu/drm/drm_framebuffer.c @@ -278,7 +278,8 @@ drm_internal_framebuffer_create(struct drm_device *dev, struct drm_framebuffer *fb; int ret; - if (r->flags & ~(DRM_MODE_FB_INTERLACED | DRM_MODE_FB_MODIFIERS)) { + if (r->flags & ~(DRM_MODE_FB_INTERLACED | DRM_MODE_FB_MODIFIERS | + DRM_MODE_FB_SECURE)) { DRM_DEBUG_KMS("bad framebuffer flags 0x%08x\n", r->flags); return ERR_PTR(-EINVAL); } diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index aa6b551c7c51f1e42bbe4013b78ca554b0c1329e..8f20553d05a43b92b017bbf29bce17e3a5eb8286 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -321,7 +321,12 @@ drm_setclientcap(struct drm_device *dev, void *data, struct drm_file *file_priv) case DRM_CLIENT_CAP_ATOMIC: if (!drm_core_check_feature(dev, DRIVER_ATOMIC)) return -EINVAL; - if (req->value > 1) + /* The modesetting DDX has a totally broken idea of atomic. */ + if (current->comm[0] == 'X' && req->value == 1) { + pr_info("broken atomic modeset userspace detected, disabling atomic\n"); + return -EOPNOTSUPP; + } + if (req->value > 2) return -EINVAL; file_priv->atomic = req->value; file_priv->universal_planes = req->value; diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c index 80b75501f5c6a203d7967fffdb94378b89039dab..e730c6d26bd26e121ec2ead22d08a56e0b2991b4 100644 --- a/drivers/gpu/drm/drm_mipi_dsi.c +++ b/drivers/gpu/drm/drm_mipi_dsi.c @@ -360,6 +360,7 @@ static ssize_t mipi_dsi_device_transfer(struct mipi_dsi_device *dsi, if (dsi->mode_flags & MIPI_DSI_MODE_LPM) msg->flags |= MIPI_DSI_MSG_USE_LPM; + msg->flags |= MIPI_DSI_MSG_LASTCOMMAND; return ops->transfer(dsi->host, msg); } @@ -1051,17 +1052,33 @@ EXPORT_SYMBOL(mipi_dsi_dcs_set_tear_scanline); * display * @dsi: DSI peripheral device * @brightness: brightness value + * @num_params: Number of parameters (bytes) to encode brightness value in. The + * MIPI specification states that one parameter shall be sent for + * devices that support 8-bit brightness levels. For devices that + * support brightness levels wider than 8-bit, two parameters + * shall be sent. * * Return: 0 on success or a negative error code on failure. */ int mipi_dsi_dcs_set_display_brightness(struct mipi_dsi_device *dsi, - u16 brightness) + u16 brightness, size_t num_params) { - u8 payload[2] = { brightness & 0xff, brightness >> 8 }; + u8 payload[2]; ssize_t err; + switch (num_params) { + case 1: + payload[0] = brightness & 0xff; + break; + case 2: + payload[0] = brightness >> 8; + payload[1] = brightness & 0xff; + break; + default: + return -EINVAL; + } err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_DISPLAY_BRIGHTNESS, - payload, sizeof(payload)); + payload, num_params); if (err < 0) return err; @@ -1074,16 +1091,25 @@ EXPORT_SYMBOL(mipi_dsi_dcs_set_display_brightness); * of the display * @dsi: DSI peripheral device * @brightness: brightness value + * @num_params: Number of parameters (i.e. bytes) the brightness value is + * encoded in. The MIPI specification states that one parameter + * shall be returned from devices that support 8-bit brightness + * levels. Devices that support brightness levels wider than + * 8-bit return two parameters (i.e. bytes). * * Return: 0 on success or a negative error code on failure. */ int mipi_dsi_dcs_get_display_brightness(struct mipi_dsi_device *dsi, - u16 *brightness) + u16 *brightness, size_t num_params) { + u8 payload[2]; ssize_t err; + if (!(num_params == 1 || num_params == 2)) + return -EINVAL; + err = mipi_dsi_dcs_read(dsi, MIPI_DCS_GET_DISPLAY_BRIGHTNESS, - brightness, sizeof(*brightness)); + payload, num_params); if (err <= 0) { if (err == 0) err = -ENODATA; @@ -1091,6 +1117,15 @@ int mipi_dsi_dcs_get_display_brightness(struct mipi_dsi_device *dsi, return err; } + switch (num_params) { + case 1: + *brightness = payload[0]; + break; + case 2: + *brightness = payload[0] << 8 || payload[1]; + break; + } + return 0; } EXPORT_SYMBOL(mipi_dsi_dcs_get_display_brightness); diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index a3104d79b48f07d229264d050f993e515d78e396..7550435c3ac5e63232716de84a54a7c474fdfe1f 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -1753,6 +1753,7 @@ int drm_mode_convert_umode(struct drm_device *dev, return 0; } +EXPORT_SYMBOL_GPL(drm_mode_convert_umode); /** * drm_mode_is_420_only - if a given videomode can be only supported in YCBCR420 diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c index 1d9a9d2fe0e098c4f00b59816b92d262c9620073..d37b83f40efc54641c733e116ff26e219867d399 100644 --- a/drivers/gpu/drm/drm_panel.c +++ b/drivers/gpu/drm/drm_panel.c @@ -48,6 +48,7 @@ static LIST_HEAD(panel_list); void drm_panel_init(struct drm_panel *panel) { INIT_LIST_HEAD(&panel->list); + BLOCKING_INIT_NOTIFIER_HEAD(&panel->nh); } EXPORT_SYMBOL(drm_panel_init); @@ -169,6 +170,27 @@ struct drm_panel *of_drm_find_panel(const struct device_node *np) EXPORT_SYMBOL(of_drm_find_panel); #endif +int drm_panel_notifier_register(struct drm_panel *panel, + struct notifier_block *nb) +{ + return blocking_notifier_chain_register(&panel->nh, nb); +} +EXPORT_SYMBOL_GPL(drm_panel_notifier_register); + +int drm_panel_notifier_unregister(struct drm_panel *panel, + struct notifier_block *nb) +{ + return blocking_notifier_chain_unregister(&panel->nh, nb); +} +EXPORT_SYMBOL_GPL(drm_panel_notifier_unregister); + +int drm_panel_notifier_call_chain(struct drm_panel *panel, + unsigned long val, void *v) +{ + return blocking_notifier_call_chain(&panel->nh, val, v); +} +EXPORT_SYMBOL_GPL(drm_panel_notifier_call_chain); + MODULE_AUTHOR("Thierry Reding "); MODULE_DESCRIPTION("DRM panel infrastructure"); MODULE_LICENSE("GPL and additional rights"); diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c index 896e42a34895da226585977d512a89f0677234da..d89a992829beb35514ae0a6e88529cae120db657 100644 --- a/drivers/gpu/drm/drm_pci.c +++ b/drivers/gpu/drm/drm_pci.c @@ -46,8 +46,6 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align) { drm_dma_handle_t *dmah; - unsigned long addr; - size_t sz; /* pci_alloc_consistent only guarantees alignment to the smallest * PAGE_SIZE order which is greater than or equal to the requested size. @@ -61,22 +59,13 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t ali return NULL; dmah->size = size; - dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size, &dmah->busaddr, GFP_KERNEL | __GFP_COMP); + dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size, &dmah->busaddr, GFP_KERNEL); if (dmah->vaddr == NULL) { kfree(dmah); return NULL; } - memset(dmah->vaddr, 0, size); - - /* XXX - Is virt_to_page() legal for consistent mem? */ - /* Reserve */ - for (addr = (unsigned long)dmah->vaddr, sz = size; - sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) { - SetPageReserved(virt_to_page((void *)addr)); - } - return dmah; } @@ -89,19 +78,9 @@ EXPORT_SYMBOL(drm_pci_alloc); */ void __drm_legacy_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah) { - unsigned long addr; - size_t sz; - - if (dmah->vaddr) { - /* XXX - Is virt_to_page() legal for consistent mem? */ - /* Unreserve */ - for (addr = (unsigned long)dmah->vaddr, sz = dmah->size; - sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) { - ClearPageReserved(virt_to_page((void *)addr)); - } + if (dmah->vaddr) dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr, dmah->busaddr); - } } /** diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index 1c32b7070fed39450c20edb310fc5a4ba98d349f..2f09f3473375ee1cb4d903386834c72163f1d26d 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -433,6 +433,28 @@ void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) } EXPORT_SYMBOL(drm_gem_dmabuf_vunmap); +/** + * drm_gem_dmabuf_get_uuid - dma_buf get_uuid implementation for GEM + * @dma_buf: buffer to query + * @uuid: uuid outparam + * + * Queries the buffer's virtio UUID. This can be used as the + * &dma_buf_ops.get_uuid callback. Calls into &drm_driver.gem_prime_get_uuid. + * + * Returns 0 on success or a negative error code on failure. + */ +int drm_gem_dmabuf_get_uuid(struct dma_buf *dma_buf, uuid_t *uuid) +{ + struct drm_gem_object *obj = dma_buf->priv; + struct drm_device *dev = obj->dev; + + if (!dev->driver->gem_prime_get_uuid) + return -ENODEV; + + return dev->driver->gem_prime_get_uuid(obj, uuid); +} +EXPORT_SYMBOL(drm_gem_dmabuf_get_uuid); + /** * drm_gem_dmabuf_kmap - map implementation for GEM * @dma_buf: buffer to be mapped @@ -494,6 +516,7 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = { .mmap = drm_gem_dmabuf_mmap, .vmap = drm_gem_dmabuf_vmap, .vunmap = drm_gem_dmabuf_vunmap, + .get_uuid = drm_gem_dmabuf_get_uuid, }; /** diff --git a/drivers/gpu/drm/drm_property.c b/drivers/gpu/drm/drm_property.c index 69dfed57c2f844de3fb475d20500a51b89434170..6532a9d4de9017e1caa2d39cd89d127545a9fad3 100644 --- a/drivers/gpu/drm/drm_property.c +++ b/drivers/gpu/drm/drm_property.c @@ -26,6 +26,9 @@ #include "drm_crtc_internal.h" +#define MAX_BLOB_PROP_SIZE (PAGE_SIZE * 30) +#define MAX_BLOB_PROP_COUNT 250 + /** * DOC: overview * @@ -556,7 +559,8 @@ drm_property_create_blob(struct drm_device *dev, size_t length, struct drm_property_blob *blob; int ret; - if (!length || length > INT_MAX - sizeof(struct drm_property_blob)) + if (!length || length > MAX_BLOB_PROP_SIZE - + sizeof(struct drm_property_blob)) return ERR_PTR(-EINVAL); blob = kvzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL); @@ -782,12 +786,21 @@ int drm_mode_createblob_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_create_blob *out_resp = data; - struct drm_property_blob *blob; + struct drm_property_blob *blob, *bt; int ret = 0; + u32 count = 0; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; + mutex_lock(&dev->mode_config.blob_lock); + list_for_each_entry(bt, &file_priv->blobs, head_file) + count++; + mutex_unlock(&dev->mode_config.blob_lock); + + if (count >= MAX_BLOB_PROP_COUNT) + return -EOPNOTSUPP; + blob = drm_property_create_blob(dev, out_resp->length, NULL); if (IS_ERR(blob)) return PTR_ERR(blob); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c index 7fea74861a87fdff9a30c3a313a06360093aba80..c83655b008b9f7aafab5d4624b51140b90d07409 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c @@ -311,6 +311,8 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state, u32 return_target, return_dwords; u32 link_target, link_dwords; bool switch_context = gpu->exec_state != exec_state; + unsigned int new_flush_seq = READ_ONCE(gpu->mmu->flush_seq); + bool need_flush = gpu->flush_seq != new_flush_seq; lockdep_assert_held(&gpu->lock); @@ -325,14 +327,14 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state, * need to append a mmu flush load state, followed by a new * link to this buffer - a total of four additional words. */ - if (gpu->mmu->need_flush || switch_context) { + if (need_flush || switch_context) { u32 target, extra_dwords; /* link command */ extra_dwords = 1; /* flush command */ - if (gpu->mmu->need_flush) { + if (need_flush) { if (gpu->mmu->version == ETNAVIV_IOMMU_V1) extra_dwords += 1; else @@ -345,7 +347,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state, target = etnaviv_buffer_reserve(gpu, buffer, extra_dwords); - if (gpu->mmu->need_flush) { + if (need_flush) { /* Add the MMU flush */ if (gpu->mmu->version == ETNAVIV_IOMMU_V1) { CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_MMU, @@ -365,7 +367,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state, SYNC_RECIPIENT_PE); } - gpu->mmu->need_flush = false; + gpu->flush_seq = new_flush_seq; } if (switch_context) { diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h index 9a75a6937268eebff62e04a13b3669fb3d5934b1..039e0509af6abf16c01aebc1fc2f038dee4ae81c 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h @@ -139,6 +139,7 @@ struct etnaviv_gpu { struct etnaviv_iommu *mmu; struct etnaviv_cmdbuf_suballoc *cmdbuf_suballoc; + unsigned int flush_seq; /* Power Control: */ struct clk *clk_bus; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c index 8069f9f36a2ed9b269326be831a4481a1e551d99..e132dccedf88f2076240b7de746e2a7bc620dfb5 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c @@ -261,7 +261,7 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu, } list_add_tail(&mapping->mmu_node, &mmu->mappings); - mmu->need_flush = true; + mmu->flush_seq++; unlock: mutex_unlock(&mmu->lock); @@ -280,7 +280,7 @@ void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu, etnaviv_iommu_remove_mapping(mmu, mapping); list_del(&mapping->mmu_node); - mmu->need_flush = true; + mmu->flush_seq++; mutex_unlock(&mmu->lock); } @@ -357,7 +357,7 @@ int etnaviv_iommu_get_suballoc_va(struct etnaviv_gpu *gpu, dma_addr_t paddr, mutex_unlock(&mmu->lock); return ret; } - gpu->mmu->need_flush = true; + mmu->flush_seq++; mutex_unlock(&mmu->lock); *iova = (u32)vram_node->start; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h index a0db17ffb686dae5c45c31e8744d2c9fd92dcab7..348a94d9695bb1ece669148d63b7bad18e1be552 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h @@ -48,7 +48,7 @@ struct etnaviv_iommu { struct mutex lock; struct list_head mappings; struct drm_mm mm; - bool need_flush; + unsigned int flush_seq; }; struct etnaviv_gem_object; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c index 4227a4006c34963800563838a90c067a9cee95c4..3ce77cbad4ae37f3d1a4a532737d928050aa10ec 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c @@ -4,6 +4,7 @@ * Copyright (C) 2017 Zodiac Inflight Innovations */ +#include "common.xml.h" #include "etnaviv_gpu.h" #include "etnaviv_perfmon.h" #include "state_hi.xml.h" @@ -31,17 +32,11 @@ struct etnaviv_pm_domain { }; struct etnaviv_pm_domain_meta { + unsigned int feature; const struct etnaviv_pm_domain *domains; u32 nr_domains; }; -static u32 simple_reg_read(struct etnaviv_gpu *gpu, - const struct etnaviv_pm_domain *domain, - const struct etnaviv_pm_signal *signal) -{ - return gpu_read(gpu, signal->data); -} - static u32 perf_reg_read(struct etnaviv_gpu *gpu, const struct etnaviv_pm_domain *domain, const struct etnaviv_pm_signal *signal) @@ -75,6 +70,34 @@ static u32 pipe_reg_read(struct etnaviv_gpu *gpu, return value; } +static u32 hi_total_cycle_read(struct etnaviv_gpu *gpu, + const struct etnaviv_pm_domain *domain, + const struct etnaviv_pm_signal *signal) +{ + u32 reg = VIVS_HI_PROFILE_TOTAL_CYCLES; + + if (gpu->identity.model == chipModel_GC880 || + gpu->identity.model == chipModel_GC2000 || + gpu->identity.model == chipModel_GC2100) + reg = VIVS_MC_PROFILE_CYCLE_COUNTER; + + return gpu_read(gpu, reg); +} + +static u32 hi_total_idle_cycle_read(struct etnaviv_gpu *gpu, + const struct etnaviv_pm_domain *domain, + const struct etnaviv_pm_signal *signal) +{ + u32 reg = VIVS_HI_PROFILE_IDLE_CYCLES; + + if (gpu->identity.model == chipModel_GC880 || + gpu->identity.model == chipModel_GC2000 || + gpu->identity.model == chipModel_GC2100) + reg = VIVS_HI_PROFILE_TOTAL_CYCLES; + + return gpu_read(gpu, reg); +} + static const struct etnaviv_pm_domain doms_3d[] = { { .name = "HI", @@ -84,13 +107,13 @@ static const struct etnaviv_pm_domain doms_3d[] = { .signal = (const struct etnaviv_pm_signal[]) { { "TOTAL_CYCLES", - VIVS_HI_PROFILE_TOTAL_CYCLES, - &simple_reg_read + 0, + &hi_total_cycle_read }, { "IDLE_CYCLES", - VIVS_HI_PROFILE_IDLE_CYCLES, - &simple_reg_read + 0, + &hi_total_idle_cycle_read }, { "AXI_CYCLES_READ_REQUEST_STALLED", @@ -388,36 +411,78 @@ static const struct etnaviv_pm_domain doms_vg[] = { static const struct etnaviv_pm_domain_meta doms_meta[] = { { + .feature = chipFeatures_PIPE_3D, .nr_domains = ARRAY_SIZE(doms_3d), .domains = &doms_3d[0] }, { + .feature = chipFeatures_PIPE_2D, .nr_domains = ARRAY_SIZE(doms_2d), .domains = &doms_2d[0] }, { + .feature = chipFeatures_PIPE_VG, .nr_domains = ARRAY_SIZE(doms_vg), .domains = &doms_vg[0] } }; +static unsigned int num_pm_domains(const struct etnaviv_gpu *gpu) +{ + unsigned int num = 0, i; + + for (i = 0; i < ARRAY_SIZE(doms_meta); i++) { + const struct etnaviv_pm_domain_meta *meta = &doms_meta[i]; + + if (gpu->identity.features & meta->feature) + num += meta->nr_domains; + } + + return num; +} + +static const struct etnaviv_pm_domain *pm_domain(const struct etnaviv_gpu *gpu, + unsigned int index) +{ + const struct etnaviv_pm_domain *domain = NULL; + unsigned int offset = 0, i; + + for (i = 0; i < ARRAY_SIZE(doms_meta); i++) { + const struct etnaviv_pm_domain_meta *meta = &doms_meta[i]; + + if (!(gpu->identity.features & meta->feature)) + continue; + + if (meta->nr_domains < (index - offset)) { + offset += meta->nr_domains; + continue; + } + + domain = meta->domains + (index - offset); + } + + return domain; +} + int etnaviv_pm_query_dom(struct etnaviv_gpu *gpu, struct drm_etnaviv_pm_domain *domain) { - const struct etnaviv_pm_domain_meta *meta = &doms_meta[domain->pipe]; + const unsigned int nr_domains = num_pm_domains(gpu); const struct etnaviv_pm_domain *dom; - if (domain->iter >= meta->nr_domains) + if (domain->iter >= nr_domains) return -EINVAL; - dom = meta->domains + domain->iter; + dom = pm_domain(gpu, domain->iter); + if (!dom) + return -EINVAL; domain->id = domain->iter; domain->nr_signals = dom->nr_signals; strncpy(domain->name, dom->name, sizeof(domain->name)); domain->iter++; - if (domain->iter == meta->nr_domains) + if (domain->iter == nr_domains) domain->iter = 0xff; return 0; @@ -426,14 +491,16 @@ int etnaviv_pm_query_dom(struct etnaviv_gpu *gpu, int etnaviv_pm_query_sig(struct etnaviv_gpu *gpu, struct drm_etnaviv_pm_signal *signal) { - const struct etnaviv_pm_domain_meta *meta = &doms_meta[signal->pipe]; + const unsigned int nr_domains = num_pm_domains(gpu); const struct etnaviv_pm_domain *dom; const struct etnaviv_pm_signal *sig; - if (signal->domain >= meta->nr_domains) + if (signal->domain >= nr_domains) return -EINVAL; - dom = meta->domains + signal->domain; + dom = pm_domain(gpu, signal->domain); + if (!dom) + return -EINVAL; if (signal->iter >= dom->nr_signals) return -EINVAL; diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index f59ca27a4a357492f96d0b7e37c76536037b40d6..7c0b30c955c39d491a01a02672d7034d34d41319 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -43,6 +43,46 @@ static bool use_pages(struct drm_gem_object *obj) return !msm_obj->vram_node; } +/* + * Cache sync.. this is a bit over-complicated, to fit dma-mapping + * API. Really GPU cache is out of scope here (handled on cmdstream) + * and all we need to do is invalidate newly allocated pages before + * mapping to CPU as uncached/writecombine. + * + * On top of this, we have the added headache, that depending on + * display generation, the display's iommu may be wired up to either + * the toplevel drm device (mdss), or to the mdp sub-node, meaning + * that here we either have dma-direct or iommu ops. + * + * Let this be a cautionary tail of abstraction gone wrong. + */ + +static void sync_for_device(struct msm_gem_object *msm_obj) +{ + struct device *dev = msm_obj->base.dev->dev; + + if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) { + dma_sync_sg_for_device(dev, msm_obj->sgt->sgl, + msm_obj->sgt->nents, DMA_BIDIRECTIONAL); + } else { + dma_map_sg(dev, msm_obj->sgt->sgl, + msm_obj->sgt->nents, DMA_BIDIRECTIONAL); + } +} + +static void sync_for_cpu(struct msm_gem_object *msm_obj) +{ + struct device *dev = msm_obj->base.dev->dev; + + if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) { + dma_sync_sg_for_cpu(dev, msm_obj->sgt->sgl, + msm_obj->sgt->nents, DMA_BIDIRECTIONAL); + } else { + dma_unmap_sg(dev, msm_obj->sgt->sgl, + msm_obj->sgt->nents, DMA_BIDIRECTIONAL); + } +} + /* allocate pages from VRAM carveout, used when no IOMMU: */ static struct page **get_pages_vram(struct drm_gem_object *obj, int npages) { @@ -108,8 +148,7 @@ static struct page **get_pages(struct drm_gem_object *obj) * because display controller, GPU, etc. are not coherent: */ if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) - dma_map_sg(dev->dev, msm_obj->sgt->sgl, - msm_obj->sgt->nents, DMA_BIDIRECTIONAL); + sync_for_device(msm_obj); } return msm_obj->pages; @@ -138,9 +177,7 @@ static void put_pages(struct drm_gem_object *obj) * GPU, etc. are not coherent: */ if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) - dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl, - msm_obj->sgt->nents, - DMA_BIDIRECTIONAL); + sync_for_cpu(msm_obj); sg_free_table(msm_obj->sgt); kfree(msm_obj->sgt); diff --git a/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c b/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c index 99caa7835e7b11d0be01b6aa5a0bbb3268cbd4ba..5d21f7b6ba92abd848a51e45ad57d8ce4a525ee8 100644 --- a/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c +++ b/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c @@ -341,7 +341,7 @@ static int dsi_dcs_bl_get_brightness(struct backlight_device *bl) dsi->mode_flags &= ~MIPI_DSI_MODE_LPM; - ret = mipi_dsi_dcs_get_display_brightness(dsi, &brightness); + ret = mipi_dsi_dcs_get_display_brightness(dsi, &brightness, 1); if (ret < 0) return ret; @@ -357,7 +357,7 @@ static int dsi_dcs_bl_update_status(struct backlight_device *bl) dsi->mode_flags &= ~MIPI_DSI_MODE_LPM; - ret = mipi_dsi_dcs_set_display_brightness(dsi, bl->props.brightness); + ret = mipi_dsi_dcs_set_display_brightness(dsi, bl->props.brightness, 1); if (ret < 0) return ret; diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c b/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c index aeb32aa588991cfc08d1a1cf6d4578f908135932..7e047aa7d20f27858c0aedd2e8b555e94c0cc682 100644 --- a/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c +++ b/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c @@ -365,7 +365,7 @@ static int s6e63j0x03_enable(struct drm_panel *panel) return ret; /* set default white brightness */ - ret = mipi_dsi_dcs_set_display_brightness(dsi, 0x00ff); + ret = mipi_dsi_dcs_set_display_brightness(dsi, 0x00ff, 1); if (ret < 0) return ret; diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c index 208af9f379144d09fa4dba4ee9f98f3745106cc9..5177e37d81e65fdb9377386397b0b4ea09fe9698 100644 --- a/drivers/gpu/drm/qxl/qxl_cmd.c +++ b/drivers/gpu/drm/qxl/qxl_cmd.c @@ -472,9 +472,10 @@ int qxl_hw_surface_alloc(struct qxl_device *qdev, return ret; ret = qxl_release_reserve_list(release, true); - if (ret) + if (ret) { + qxl_release_free(qdev, release); return ret; - + } cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release); cmd->type = QXL_SURFACE_CMD_CREATE; cmd->flags = QXL_SURF_FLAG_KEEP_DATA; @@ -500,8 +501,8 @@ int qxl_hw_surface_alloc(struct qxl_device *qdev, /* no need to add a release to the fence for this surface bo, since it is only released when we ask to destroy the surface and it would never signal otherwise */ - qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false); qxl_release_fence_buffer_objects(release); + qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false); surf->hw_surf_alloc = true; spin_lock(&qdev->surf_id_idr_lock); @@ -543,9 +544,8 @@ int qxl_hw_surface_dealloc(struct qxl_device *qdev, cmd->surface_id = id; qxl_release_unmap(qdev, release, &cmd->release_info); - qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false); - qxl_release_fence_buffer_objects(release); + qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false); return 0; } diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index 0570c6826bff40911b120cb1a99e56a3db5ac421..769e12b4c13c58be10572003930ad0459da771df 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -532,8 +532,8 @@ static int qxl_primary_apply_cursor(struct drm_plane *plane) cmd->u.set.visible = 1; qxl_release_unmap(qdev, release, &cmd->release_info); - qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); qxl_release_fence_buffer_objects(release); + qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); return ret; @@ -694,8 +694,8 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane, cmd->u.position.y = plane->state->crtc_y + fb->hot_y; qxl_release_unmap(qdev, release, &cmd->release_info); - qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); qxl_release_fence_buffer_objects(release); + qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); if (old_cursor_bo) qxl_bo_unref(&old_cursor_bo); @@ -740,8 +740,8 @@ static void qxl_cursor_atomic_disable(struct drm_plane *plane, cmd->type = QXL_CURSOR_HIDE; qxl_release_unmap(qdev, release, &cmd->release_info); - qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); qxl_release_fence_buffer_objects(release); + qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); } static int qxl_plane_prepare_fb(struct drm_plane *plane, diff --git a/drivers/gpu/drm/qxl/qxl_draw.c b/drivers/gpu/drm/qxl/qxl_draw.c index 4d8681e84e684f58fa5aec6e1da3c44818befb0e..d009f2bc28e93d2efd4a77770b623ab99cf58d82 100644 --- a/drivers/gpu/drm/qxl/qxl_draw.c +++ b/drivers/gpu/drm/qxl/qxl_draw.c @@ -241,8 +241,8 @@ void qxl_draw_opaque_fb(const struct qxl_fb_image *qxl_fb_image, qxl_bo_physical_address(qdev, dimage->bo, 0); qxl_release_unmap(qdev, release, &drawable->release_info); - qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); qxl_release_fence_buffer_objects(release); + qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); out_free_palette: if (palette_bo) @@ -348,9 +348,10 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev, goto out_release_backoff; rects = drawable_set_clipping(qdev, num_clips, clips_bo); - if (!rects) + if (!rects) { + ret = -EINVAL; goto out_release_backoff; - + } drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); drawable->clip.type = SPICE_CLIP_TYPE_RECTS; @@ -381,8 +382,8 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev, } qxl_bo_kunmap(clips_bo); - qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); qxl_release_fence_buffer_objects(release); + qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); out_release_backoff: if (ret) @@ -432,8 +433,8 @@ void qxl_draw_copyarea(struct qxl_device *qdev, drawable->u.copy_bits.src_pos.y = sy; qxl_release_unmap(qdev, release, &drawable->release_info); - qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); qxl_release_fence_buffer_objects(release); + qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); out_free_release: if (ret) @@ -476,8 +477,8 @@ void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec) qxl_release_unmap(qdev, release, &drawable->release_info); - qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); qxl_release_fence_buffer_objects(release); + qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); out_free_release: if (ret) diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c index 6cc9f3367fa05581a90280b7cc111259cc2692fa..5536b2f7b7b00c9b31d0ff46b6f9506e30d9ec05 100644 --- a/drivers/gpu/drm/qxl/qxl_ioctl.c +++ b/drivers/gpu/drm/qxl/qxl_ioctl.c @@ -257,11 +257,8 @@ static int qxl_process_single_command(struct qxl_device *qdev, apply_surf_reloc(qdev, &reloc_info[i]); } + qxl_release_fence_buffer_objects(release); ret = qxl_push_command_ring_release(qdev, release, cmd->type, true); - if (ret) - qxl_release_backoff_reserve_list(release); - else - qxl_release_fence_buffer_objects(release); out_free_bos: out_free_release: diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index fd5522fd179e56399c63ce819e0f2a3580bc9ec1..86b98856756d9d15f5c3cabe7cc3fb89db9b48a9 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -698,11 +698,23 @@ static enum drm_mode_status vc4_hdmi_encoder_mode_valid(struct drm_encoder *crtc, const struct drm_display_mode *mode) { - /* HSM clock must be 108% of the pixel clock. Additionally, - * the AXI clock needs to be at least 25% of pixel clock, but - * HSM ends up being the limiting factor. + /* + * As stated in RPi's vc4 firmware "HDMI state machine (HSM) clock must + * be faster than pixel clock, infinitesimally faster, tested in + * simulation. Otherwise, exact value is unimportant for HDMI + * operation." This conflicts with bcm2835's vc4 documentation, which + * states HSM's clock has to be at least 108% of the pixel clock. + * + * Real life tests reveal that vc4's firmware statement holds up, and + * users are able to use pixel clocks closer to HSM's, namely for + * 1920x1200@60Hz. So it was decided to have leave a 1% margin between + * both clocks. Which, for RPi0-3 implies a maximum pixel clock of + * 162MHz. + * + * Additionally, the AXI clock needs to be at least 25% of + * pixel clock, but HSM ends up being the limiting factor. */ - if (mode->clock > HSM_CLOCK_FREQ / (1000 * 108 / 100)) + if (mode->clock > HSM_CLOCK_FREQ / (1000 * 101 / 100)) return MODE_CLOCK_HIGH; return MODE_OK; diff --git a/drivers/gpu/drm/virtio/Makefile b/drivers/gpu/drm/virtio/Makefile index 11e25e9a4c45380200fcfa0815933751e3939c0a..42949a17ff701ec10853a23e98e165e4869b704a 100644 --- a/drivers/gpu/drm/virtio/Makefile +++ b/drivers/gpu/drm/virtio/Makefile @@ -3,7 +3,7 @@ # Makefile for the drm device driver. This driver provides support for the # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. -virtio-gpu-y := virtgpu_drv.o virtgpu_kms.o virtgpu_drm_bus.o virtgpu_gem.o \ +virtio-gpu-y := virtgpu_drv.o virtgpu_kms.o virtgpu_gem.o \ virtgpu_fb.o virtgpu_display.o virtgpu_vq.o virtgpu_ttm.o \ virtgpu_fence.o virtgpu_object.o virtgpu_debugfs.o virtgpu_plane.o \ virtgpu_ioctl.o virtgpu_prime.o virtgpu_trace_points.o diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c index d168fb682ac6a3a4eec75754fd21624e5e1bec98..9bef800d907341796314b4e69699ae172b49e62d 100644 --- a/drivers/gpu/drm/virtio/virtgpu_display.c +++ b/drivers/gpu/drm/virtio/virtgpu_display.c @@ -354,7 +354,7 @@ static const struct drm_mode_config_funcs virtio_gpu_mode_funcs = { .atomic_commit = drm_atomic_helper_commit, }; -int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev) +void virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev) { int i; @@ -372,7 +372,6 @@ int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev) vgdev_output_init(vgdev, i); drm_mode_config_reset(vgdev->ddev); - return 0; } void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev) diff --git a/drivers/gpu/drm/virtio/virtgpu_drm_bus.c b/drivers/gpu/drm/virtio/virtgpu_drm_bus.c deleted file mode 100644 index b61e3235a8260d17c0081c91183b67e89140dcfd..0000000000000000000000000000000000000000 --- a/drivers/gpu/drm/virtio/virtgpu_drm_bus.c +++ /dev/null @@ -1,121 +0,0 @@ -/* - * Copyright (C) 2015 Red Hat, Inc. - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE - * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -#include -#include - -#include "virtgpu_drv.h" - -static void virtio_pci_kick_out_firmware_fb(struct pci_dev *pci_dev) -{ - struct apertures_struct *ap; - bool primary; - - ap = alloc_apertures(1); - if (!ap) - return; - - ap->ranges[0].base = pci_resource_start(pci_dev, 0); - ap->ranges[0].size = pci_resource_len(pci_dev, 0); - - primary = pci_dev->resource[PCI_ROM_RESOURCE].flags - & IORESOURCE_ROM_SHADOW; - - drm_fb_helper_remove_conflicting_framebuffers(ap, "virtiodrmfb", primary); - - kfree(ap); -} - -int drm_virtio_init(struct drm_driver *driver, struct virtio_device *vdev) -{ - struct drm_device *dev; - int ret; - - dev = drm_dev_alloc(driver, &vdev->dev); - if (IS_ERR(dev)) - return PTR_ERR(dev); - vdev->priv = dev; - - if (strcmp(vdev->dev.parent->bus->name, "pci") == 0) { - struct pci_dev *pdev = to_pci_dev(vdev->dev.parent); - const char *pname = dev_name(&pdev->dev); - bool vga = (pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA; - char unique[20]; - - DRM_INFO("pci: %s detected at %s\n", - vga ? "virtio-vga" : "virtio-gpu-pci", - pname); - dev->pdev = pdev; - if (vga) - virtio_pci_kick_out_firmware_fb(pdev); - - /* - * Normally the drm_dev_set_unique() call is done by core DRM. - * The following comment covers, why virtio cannot rely on it. - * - * Unlike the other virtual GPU drivers, virtio abstracts the - * underlying bus type by using struct virtio_device. - * - * Hence the dev_is_pci() check, used in core DRM, will fail - * and the unique returned will be the virtio_device "virtio0", - * while a "pci:..." one is required. - * - * A few other ideas were considered: - * - Extend the dev_is_pci() check [in drm_set_busid] to - * consider virtio. - * Seems like a bigger hack than what we have already. - * - * - Point drm_device::dev to the parent of the virtio_device - * Semantic changes: - * * Using the wrong device for i2c, framebuffer_alloc and - * prime import. - * Visual changes: - * * Helpers such as DRM_DEV_ERROR, dev_info, drm_printer, - * will print the wrong information. - * - * We could address the latter issues, by introducing - * drm_device::bus_dev, ... which would be used solely for this. - * - * So for the moment keep things as-is, with a bulky comment - * for the next person who feels like removing this - * drm_dev_set_unique() quirk. - */ - snprintf(unique, sizeof(unique), "pci:%s", pname); - ret = drm_dev_set_unique(dev, unique); - if (ret) - goto err_free; - - } - - ret = drm_dev_register(dev, 0); - if (ret) - goto err_free; - - return 0; - -err_free: - drm_dev_put(dev); - return ret; -} diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c index 1fd104612ac42d27018d5fc821478ec68c609a35..541fa03a02c7ba493d79468c4bb4a86e1c34ce23 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.c +++ b/drivers/gpu/drm/virtio/virtgpu_drv.c @@ -40,8 +40,78 @@ static int virtio_gpu_modeset = -1; MODULE_PARM_DESC(modeset, "Disable/Enable modesetting"); module_param_named(modeset, virtio_gpu_modeset, int, 0400); +static void virtio_pci_kick_out_firmware_fb(struct pci_dev *pci_dev) +{ + struct apertures_struct *ap; + bool primary; + + ap = alloc_apertures(1); + if (!ap) + return; + + ap->ranges[0].base = pci_resource_start(pci_dev, 0); + ap->ranges[0].size = pci_resource_len(pci_dev, 0); + + primary = pci_dev->resource[PCI_ROM_RESOURCE].flags + & IORESOURCE_ROM_SHADOW; + + drm_fb_helper_remove_conflicting_framebuffers(ap, "virtiodrmfb", primary); + + kfree(ap); +} + +static int virtio_gpu_pci_quirk(struct drm_device *dev, struct virtio_device *vdev) +{ + struct pci_dev *pdev = to_pci_dev(vdev->dev.parent); + const char *pname = dev_name(&pdev->dev); + bool vga = (pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA; + char unique[20]; + + DRM_INFO("pci: %s detected at %s\n", + vga ? "virtio-vga" : "virtio-gpu-pci", + pname); + dev->pdev = pdev; + if (vga) + virtio_pci_kick_out_firmware_fb(pdev); + + /* + * Normally the drm_dev_set_unique() call is done by core DRM. + * The following comment covers, why virtio cannot rely on it. + * + * Unlike the other virtual GPU drivers, virtio abstracts the + * underlying bus type by using struct virtio_device. + * + * Hence the dev_is_pci() check, used in core DRM, will fail + * and the unique returned will be the virtio_device "virtio0", + * while a "pci:..." one is required. + * + * A few other ideas were considered: + * - Extend the dev_is_pci() check [in drm_set_busid] to + * consider virtio. + * Seems like a bigger hack than what we have already. + * + * - Point drm_device::dev to the parent of the virtio_device + * Semantic changes: + * * Using the wrong device for i2c, framebuffer_alloc and + * prime import. + * Visual changes: + * * Helpers such as DRM_DEV_ERROR, dev_info, drm_printer, + * will print the wrong information. + * + * We could address the latter issues, by introducing + * drm_device::bus_dev, ... which would be used solely for this. + * + * So for the moment keep things as-is, with a bulky comment + * for the next person who feels like removing this + * drm_dev_set_unique() quirk. + */ + snprintf(unique, sizeof(unique), "pci:%s", pname); + return drm_dev_set_unique(dev, unique); +} + static int virtio_gpu_probe(struct virtio_device *vdev) { + struct drm_device *dev; int ret; if (vgacon_text_force() && virtio_gpu_modeset == -1) @@ -50,18 +120,39 @@ static int virtio_gpu_probe(struct virtio_device *vdev) if (virtio_gpu_modeset == 0) return -EINVAL; - ret = drm_virtio_init(&driver, vdev); + dev = drm_dev_alloc(&driver, &vdev->dev); + if (IS_ERR(dev)) + return PTR_ERR(dev); + vdev->priv = dev; + + if (!strcmp(vdev->dev.parent->bus->name, "pci")) { + ret = virtio_gpu_pci_quirk(dev, vdev); + if (ret) + goto err_free; + } + + ret = virtio_gpu_init(dev); + if (ret) + goto err_free; + + ret = drm_dev_register(dev, 0); if (ret) - return ret; + goto err_free; drm_fbdev_generic_setup(vdev->priv, 32); return 0; + +err_free: + drm_dev_put(dev); + return ret; } static void virtio_gpu_remove(struct virtio_device *vdev) { struct drm_device *dev = vdev->priv; + drm_dev_unregister(dev); + virtio_gpu_deinit(dev); drm_put_dev(dev); } @@ -123,8 +214,6 @@ static const struct file_operations virtio_gpu_driver_fops = { static struct drm_driver driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_RENDER | DRIVER_ATOMIC, - .load = virtio_gpu_driver_load, - .unload = virtio_gpu_driver_unload, .open = virtio_gpu_driver_open, .postclose = virtio_gpu_driver_postclose, diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h index a9aa0236e3ddd4111305284b32be1b43f58c7c65..d8e98cdde911115ef7e09748fc15a5b55ac2b39c 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.h +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h @@ -50,9 +50,6 @@ #define DRIVER_MINOR 1 #define DRIVER_PATCHLEVEL 0 -/* virtgpu_drm_bus.c */ -int drm_virtio_init(struct drm_driver *driver, struct virtio_device *vdev); - struct virtio_gpu_object_params { uint32_t format; uint32_t width; @@ -231,8 +228,8 @@ int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket, void virtio_gpu_unref_list(struct list_head *head); /* virtio_kms.c */ -int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags); -void virtio_gpu_driver_unload(struct drm_device *dev); +int virtio_gpu_init(struct drm_device *dev); +void virtio_gpu_deinit(struct drm_device *dev); int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file); void virtio_gpu_driver_postclose(struct drm_device *dev, struct drm_file *file); @@ -343,7 +340,7 @@ int virtio_gpu_framebuffer_init(struct drm_device *dev, struct virtio_gpu_framebuffer *vgfb, const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object *obj); -int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev); +void virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev); void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev); /* virtio_gpu_plane.c */ diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c index c340be252fce9dc5f5802fb48aeca37752ebbdf8..84b6a6bf00c68222cfde74ec7f9d64c43b0ed9f3 100644 --- a/drivers/gpu/drm/virtio/virtgpu_kms.c +++ b/drivers/gpu/drm/virtio/virtgpu_kms.c @@ -106,7 +106,7 @@ static void virtio_gpu_get_capsets(struct virtio_gpu_device *vgdev, vgdev->num_capsets = num_capsets; } -int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags) +int virtio_gpu_init(struct drm_device *dev) { static vq_callback_t *callbacks[] = { virtio_gpu_ctrl_ack, virtio_gpu_cursor_ack @@ -193,9 +193,7 @@ int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags) num_capsets, &num_capsets); DRM_INFO("number of cap sets: %d\n", num_capsets); - ret = virtio_gpu_modeset_init(vgdev); - if (ret) - goto err_modeset; + virtio_gpu_modeset_init(vgdev); virtio_device_ready(vgdev->vdev); vgdev->vqs_ready = true; @@ -209,7 +207,6 @@ int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags) 5 * HZ); return 0; -err_modeset: err_scanouts: virtio_gpu_ttm_fini(vgdev); err_ttm: @@ -231,7 +228,7 @@ static void virtio_gpu_cleanup_cap_cache(struct virtio_gpu_device *vgdev) } } -void virtio_gpu_driver_unload(struct drm_device *dev) +void virtio_gpu_deinit(struct drm_device *dev) { struct virtio_gpu_device *vgdev = dev->dev_private; diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c index aaeceb7c6fccf14f294d096a488c48aadb3cf98c..44a787f5ce794fc40f0ab2a5685e4fa70a4168c6 100644 --- a/drivers/gpu/drm/virtio/virtgpu_object.c +++ b/drivers/gpu/drm/virtio/virtgpu_object.c @@ -23,38 +23,44 @@ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +#include + #include #include "virtgpu_drv.h" +static int virtio_gpu_virglrenderer_workaround = 1; +module_param_named(virglhack, virtio_gpu_virglrenderer_workaround, int, 0400); + static int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev, uint32_t *resid) { -#if 0 - int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL); - - if (handle < 0) - return handle; -#else - static int handle; - - /* - * FIXME: dirty hack to avoid re-using IDs, virglrenderer - * can't deal with that. Needs fixing in virglrenderer, also - * should figure a better way to handle that in the guest. - */ - handle++; -#endif - - *resid = handle + 1; + if (virtio_gpu_virglrenderer_workaround) { + /* + * Hack to avoid re-using resource IDs. + * + * virglrenderer versions up to (and including) 0.7.0 + * can't deal with that. virglrenderer commit + * "f91a9dd35715 Fix unlinking resources from hash + * table." (Feb 2019) fixes the bug. + */ + static atomic_t seqno = ATOMIC_INIT(0); + int handle = atomic_inc_return(&seqno); + *resid = handle + 1; + } else { + int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL); + if (handle < 0) + return handle; + *resid = handle + 1; + } return 0; } static void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id) { -#if 0 - ida_free(&vgdev->resource_ida, id - 1); -#endif + if (!virtio_gpu_virglrenderer_workaround) { + ida_free(&vgdev->resource_ida, id - 1); + } } static void virtio_gpu_ttm_bo_destroy(struct ttm_buffer_object *tbo) diff --git a/drivers/gpu/trace/Kconfig b/drivers/gpu/trace/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..c24e9edd022e68c73e1a9ddc1ff600ef54646dbc --- /dev/null +++ b/drivers/gpu/trace/Kconfig @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0-only + +config TRACE_GPU_MEM + bool diff --git a/drivers/gpu/trace/Makefile b/drivers/gpu/trace/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..b70fbdc5847f002a70df7a57d9d0da9c518bb86e --- /dev/null +++ b/drivers/gpu/trace/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_TRACE_GPU_MEM) += trace_gpu_mem.o diff --git a/drivers/gpu/trace/trace_gpu_mem.c b/drivers/gpu/trace/trace_gpu_mem.c new file mode 100644 index 0000000000000000000000000000000000000000..01e855897b6d093d4cd84cc1df13194121baec4d --- /dev/null +++ b/drivers/gpu/trace/trace_gpu_mem.c @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * GPU memory trace points + * + * Copyright (C) 2020 Google, Inc. + */ + +#include + +#define CREATE_TRACE_POINTS +#include + +EXPORT_TRACEPOINT_SYMBOL(gpu_mem_total); diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c index 11103efebbaa86e5bf156de6a40544f0278026ca..1e6f8b0d00fb6d40133da56811e5696161c4fc26 100644 --- a/drivers/hid/usbhid/hid-core.c +++ b/drivers/hid/usbhid/hid-core.c @@ -685,16 +685,21 @@ static int usbhid_open(struct hid_device *hid) struct usbhid_device *usbhid = hid->driver_data; int res; + mutex_lock(&usbhid->mutex); + set_bit(HID_OPENED, &usbhid->iofl); - if (hid->quirks & HID_QUIRK_ALWAYS_POLL) - return 0; + if (hid->quirks & HID_QUIRK_ALWAYS_POLL) { + res = 0; + goto Done; + } res = usb_autopm_get_interface(usbhid->intf); /* the device must be awake to reliably request remote wakeup */ if (res < 0) { clear_bit(HID_OPENED, &usbhid->iofl); - return -EIO; + res = -EIO; + goto Done; } usbhid->intf->needs_remote_wakeup = 1; @@ -728,6 +733,9 @@ static int usbhid_open(struct hid_device *hid) msleep(50); clear_bit(HID_RESUME_RUNNING, &usbhid->iofl); + + Done: + mutex_unlock(&usbhid->mutex); return res; } @@ -735,6 +743,8 @@ static void usbhid_close(struct hid_device *hid) { struct usbhid_device *usbhid = hid->driver_data; + mutex_lock(&usbhid->mutex); + /* * Make sure we don't restart data acquisition due to * a resumption we no longer care about by avoiding racing @@ -746,12 +756,13 @@ static void usbhid_close(struct hid_device *hid) clear_bit(HID_IN_POLLING, &usbhid->iofl); spin_unlock_irq(&usbhid->lock); - if (hid->quirks & HID_QUIRK_ALWAYS_POLL) - return; + if (!(hid->quirks & HID_QUIRK_ALWAYS_POLL)) { + hid_cancel_delayed_stuff(usbhid); + usb_kill_urb(usbhid->urbin); + usbhid->intf->needs_remote_wakeup = 0; + } - hid_cancel_delayed_stuff(usbhid); - usb_kill_urb(usbhid->urbin); - usbhid->intf->needs_remote_wakeup = 0; + mutex_unlock(&usbhid->mutex); } /* @@ -1060,6 +1071,8 @@ static int usbhid_start(struct hid_device *hid) unsigned int n, insize = 0; int ret; + mutex_lock(&usbhid->mutex); + clear_bit(HID_DISCONNECTED, &usbhid->iofl); usbhid->bufsize = HID_MIN_BUFFER_SIZE; @@ -1180,6 +1193,8 @@ static int usbhid_start(struct hid_device *hid) usbhid_set_leds(hid); device_set_wakeup_enable(&dev->dev, 1); } + + mutex_unlock(&usbhid->mutex); return 0; fail: @@ -1190,6 +1205,7 @@ static int usbhid_start(struct hid_device *hid) usbhid->urbout = NULL; usbhid->urbctrl = NULL; hid_free_buffers(dev, hid); + mutex_unlock(&usbhid->mutex); return ret; } @@ -1205,6 +1221,8 @@ static void usbhid_stop(struct hid_device *hid) usbhid->intf->needs_remote_wakeup = 0; } + mutex_lock(&usbhid->mutex); + clear_bit(HID_STARTED, &usbhid->iofl); spin_lock_irq(&usbhid->lock); /* Sync with error and led handlers */ set_bit(HID_DISCONNECTED, &usbhid->iofl); @@ -1225,6 +1243,8 @@ static void usbhid_stop(struct hid_device *hid) usbhid->urbout = NULL; hid_free_buffers(hid_to_usb_dev(hid), hid); + + mutex_unlock(&usbhid->mutex); } static int usbhid_power(struct hid_device *hid, int lvl) @@ -1385,6 +1405,7 @@ static int usbhid_probe(struct usb_interface *intf, const struct usb_device_id * INIT_WORK(&usbhid->reset_work, hid_reset); timer_setup(&usbhid->io_retry, hid_retry_timeout, 0); spin_lock_init(&usbhid->lock); + mutex_init(&usbhid->mutex); ret = hid_add_device(hid); if (ret) { diff --git a/drivers/hid/usbhid/usbhid.h b/drivers/hid/usbhid/usbhid.h index da9c61d54be6c9d9018431f43a44a761e070c247..caa0ee63958153a684fb4183832c3faa24b96218 100644 --- a/drivers/hid/usbhid/usbhid.h +++ b/drivers/hid/usbhid/usbhid.h @@ -93,6 +93,7 @@ struct usbhid_device { dma_addr_t outbuf_dma; /* Output buffer dma */ unsigned long last_out; /* record of last output for timeouts */ + struct mutex mutex; /* start/stop/open/close */ spinlock_t lock; /* fifo spinlock */ unsigned long iofl; /* I/O flags (CTRL_RUNNING, OUT_RUNNING) */ struct timer_list io_retry; /* Retry timer */ diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c index 3038c975e417c2e4c58fb13e5e9f815d87445f25..8249ff3a5a8d2f2c1b3acd3e532d101f51ce84e4 100644 --- a/drivers/hid/wacom_sys.c +++ b/drivers/hid/wacom_sys.c @@ -290,9 +290,11 @@ static void wacom_feature_mapping(struct hid_device *hdev, data[0] = field->report->id; ret = wacom_get_report(hdev, HID_FEATURE_REPORT, data, n, WAC_CMD_RETRIES); - if (ret == n) { + if (ret == n && features->type == HID_GENERIC) { ret = hid_report_raw_event(hdev, HID_FEATURE_REPORT, data, n, 0); + } else if (ret == 2 && features->type != HID_GENERIC) { + features->touch_max = data[1]; } else { features->touch_max = 16; hid_warn(hdev, "wacom_feature_mapping: " diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c index 16eb9b3f1cb1b57cb244ed5b7f02f82b18d15a13..3bf1f9ef8ea258648d5b27474019f2004612c584 100644 --- a/drivers/hv/channel_mgmt.c +++ b/drivers/hv/channel_mgmt.c @@ -849,6 +849,9 @@ void vmbus_initiate_unload(bool crash) { struct vmbus_channel_message_header hdr; + if (xchg(&vmbus_connection.conn_state, DISCONNECTED) == DISCONNECTED) + return; + /* Pre-Win2012R2 hosts don't support reconnect */ if (vmbus_proto_version < VERSION_WIN8_1) return; diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index 9aa18f387a34679de8a7b399f0bfefb54178d772..fb22b72fd535a6c780bf1b84268752bd8cb63823 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -43,6 +43,7 @@ #include #include #include +#include #include "hyperv_vmbus.h" struct vmbus_dynid { @@ -58,14 +59,35 @@ static int hyperv_cpuhp_online; static void *hv_panic_page; +/* + * Boolean to control whether to report panic messages over Hyper-V. + * + * It can be set via /proc/sys/kernel/hyperv/record_panic_msg + */ +static int sysctl_record_panic_msg = 1; + +static int hyperv_report_reg(void) +{ + return !sysctl_record_panic_msg || !hv_panic_page; +} + static int hyperv_panic_event(struct notifier_block *nb, unsigned long val, void *args) { struct pt_regs *regs; - regs = current_pt_regs(); + vmbus_initiate_unload(true); - hyperv_report_panic(regs, val); + /* + * Hyper-V should be notified only once about a panic. If we will be + * doing hyperv_report_panic_msg() later with kmsg data, don't do + * the notification here. + */ + if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE + && hyperv_report_reg()) { + regs = current_pt_regs(); + hyperv_report_panic(regs, val, false); + } return NOTIFY_DONE; } @@ -75,7 +97,13 @@ static int hyperv_die_event(struct notifier_block *nb, unsigned long val, struct die_args *die = (struct die_args *)args; struct pt_regs *regs = die->regs; - hyperv_report_panic(regs, val); + /* + * Hyper-V should be notified only once about a panic. If we will be + * doing hyperv_report_panic_msg() later with kmsg data, don't do + * the notification here. + */ + if (hyperv_report_reg()) + hyperv_report_panic(regs, val, true); return NOTIFY_DONE; } @@ -1088,13 +1116,6 @@ static void vmbus_isr(void) add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR, 0); } -/* - * Boolean to control whether to report panic messages over Hyper-V. - * - * It can be set via /proc/sys/kernel/hyperv/record_panic_msg - */ -static int sysctl_record_panic_msg = 1; - /* * Callback from kmsg_dump. Grab as much as possible from the end of the kmsg * buffer and call into Hyper-V to transfer the data. @@ -1219,19 +1240,29 @@ static int vmbus_bus_init(void) hv_panic_page = (void *)get_zeroed_page(GFP_KERNEL); if (hv_panic_page) { ret = kmsg_dump_register(&hv_kmsg_dumper); - if (ret) + if (ret) { pr_err("Hyper-V: kmsg dump register " "error 0x%x\n", ret); + free_page( + (unsigned long)hv_panic_page); + hv_panic_page = NULL; + } } else pr_err("Hyper-V: panic message page memory " "allocation failed"); } register_die_notifier(&hyperv_die_block); - atomic_notifier_chain_register(&panic_notifier_list, - &hyperv_panic_block); } + /* + * Always register the panic notifier because we need to unload + * the VMbus channel connection to prevent any VMbus + * activity after the VM panics. + */ + atomic_notifier_chain_register(&panic_notifier_list, + &hyperv_panic_block); + vmbus_request_offers(); return 0; @@ -1243,7 +1274,6 @@ static int vmbus_bus_init(void) hv_remove_vmbus_irq(); bus_unregister(&hv_bus); - free_page((unsigned long)hv_panic_page); unregister_sysctl_table(hv_ctl_table_hdr); hv_ctl_table_hdr = NULL; return ret; @@ -1875,7 +1905,6 @@ static void hv_kexec_handler(void) { hv_synic_clockevents_cleanup(); vmbus_initiate_unload(false); - vmbus_connection.conn_state = DISCONNECTED; /* Make sure conn_state is set as hv_synic_cleanup checks for it */ mb(); cpuhp_remove_state(hyperv_cpuhp_online); @@ -1890,7 +1919,6 @@ static void hv_crash_handler(struct pt_regs *regs) * doing the cleanup for current CPU only. This should be sufficient * for kdump. */ - vmbus_connection.conn_state = DISCONNECTED; hv_synic_cleanup(smp_processor_id()); hyperv_cleanup(); }; diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c index e5234f953a6d16213920df252ac8ef23d857f924..b6e5aaa54963d6b30e35f2db363cf376176f5483 100644 --- a/drivers/hwmon/jc42.c +++ b/drivers/hwmon/jc42.c @@ -527,7 +527,7 @@ static int jc42_probe(struct i2c_client *client, const struct i2c_device_id *id) } data->config = config; - hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name, + hwmon_dev = devm_hwmon_device_register_with_info(dev, "jc42", data, &jc42_chip_info, NULL); return PTR_ERR_OR_ZERO(hwmon_dev); diff --git a/drivers/i2c/busses/i2c-altera.c b/drivers/i2c/busses/i2c-altera.c index a1cdcfc74acf6f8d55d23e20c2adb4b92a388784..8915ee30a5b44ee3dc7ad7019d7dc0c818c1daf1 100644 --- a/drivers/i2c/busses/i2c-altera.c +++ b/drivers/i2c/busses/i2c-altera.c @@ -395,7 +395,6 @@ static int altr_i2c_probe(struct platform_device *pdev) struct altr_i2c_dev *idev = NULL; struct resource *res; int irq, ret; - u32 val; idev = devm_kzalloc(&pdev->dev, sizeof(*idev), GFP_KERNEL); if (!idev) @@ -422,17 +421,17 @@ static int altr_i2c_probe(struct platform_device *pdev) init_completion(&idev->msg_complete); spin_lock_init(&idev->lock); - val = device_property_read_u32(idev->dev, "fifo-size", + ret = device_property_read_u32(idev->dev, "fifo-size", &idev->fifo_size); - if (val) { + if (ret) { dev_err(&pdev->dev, "FIFO size set to default of %d\n", ALTR_I2C_DFLT_FIFO_SZ); idev->fifo_size = ALTR_I2C_DFLT_FIFO_SZ; } - val = device_property_read_u32(idev->dev, "clock-frequency", + ret = device_property_read_u32(idev->dev, "clock-frequency", &idev->bus_clk_rate); - if (val) { + if (ret) { dev_err(&pdev->dev, "Default to 100kHz\n"); idev->bus_clk_rate = 100000; /* default clock rate */ } diff --git a/drivers/i2c/busses/i2c-st.c b/drivers/i2c/busses/i2c-st.c index 9e62f893958aa0016c14e74564b9025f8159c99e..81158ae8bfe361f3416f541651cfc0f2b3287762 100644 --- a/drivers/i2c/busses/i2c-st.c +++ b/drivers/i2c/busses/i2c-st.c @@ -437,6 +437,7 @@ static void st_i2c_wr_fill_tx_fifo(struct st_i2c_dev *i2c_dev) /** * st_i2c_rd_fill_tx_fifo() - Fill the Tx FIFO in read mode * @i2c_dev: Controller's private data + * @max: Maximum amount of data to fill into the Tx FIFO * * This functions fills the Tx FIFO with fixed pattern when * in read mode to trigger clock. diff --git a/drivers/iio/adc/ad7793.c b/drivers/iio/adc/ad7793.c index d4bbe5b533189730437aa5a8a4c8593288fc10da..23a6e7baa396bb2b5384d1fd5ba8cffa6a1307b1 100644 --- a/drivers/iio/adc/ad7793.c +++ b/drivers/iio/adc/ad7793.c @@ -542,7 +542,7 @@ static const struct iio_info ad7797_info = { .read_raw = &ad7793_read_raw, .write_raw = &ad7793_write_raw, .write_raw_get_fmt = &ad7793_write_raw_get_fmt, - .attrs = &ad7793_attribute_group, + .attrs = &ad7797_attribute_group, .validate_trigger = ad_sd_validate_trigger, }; diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c index 0409dcf5b04796643806cb164010171d497dcd42..24d5d049567a49989ec42e2fa36ec218ad21da94 100644 --- a/drivers/iio/adc/stm32-adc.c +++ b/drivers/iio/adc/stm32-adc.c @@ -1308,8 +1308,30 @@ static unsigned int stm32_adc_dma_residue(struct stm32_adc *adc) static void stm32_adc_dma_buffer_done(void *data) { struct iio_dev *indio_dev = data; + struct stm32_adc *adc = iio_priv(indio_dev); + int residue = stm32_adc_dma_residue(adc); + + /* + * In DMA mode the trigger services of IIO are not used + * (e.g. no call to iio_trigger_poll). + * Calling irq handler associated to the hardware trigger is not + * relevant as the conversions have already been done. Data + * transfers are performed directly in DMA callback instead. + * This implementation avoids to call trigger irq handler that + * may sleep, in an atomic context (DMA irq handler context). + */ + dev_dbg(&indio_dev->dev, "%s bufi=%d\n", __func__, adc->bufi); - iio_trigger_poll_chained(indio_dev->trig); + while (residue >= indio_dev->scan_bytes) { + u16 *buffer = (u16 *)&adc->rx_buf[adc->bufi]; + + iio_push_to_buffers(indio_dev, buffer); + + residue -= indio_dev->scan_bytes; + adc->bufi += indio_dev->scan_bytes; + if (adc->bufi >= adc->rx_buf_sz) + adc->bufi = 0; + } } static int stm32_adc_dma_start(struct iio_dev *indio_dev) @@ -1703,6 +1725,7 @@ static int stm32_adc_probe(struct platform_device *pdev) { struct iio_dev *indio_dev; struct device *dev = &pdev->dev; + irqreturn_t (*handler)(int irq, void *p) = NULL; struct stm32_adc *adc; int ret; @@ -1785,9 +1808,11 @@ static int stm32_adc_probe(struct platform_device *pdev) if (ret < 0) goto err_clk_disable; + if (!adc->dma_chan) + handler = &stm32_adc_trigger_handler; + ret = iio_triggered_buffer_setup(indio_dev, - &iio_pollfunc_store_time, - &stm32_adc_trigger_handler, + &iio_pollfunc_store_time, handler, &stm32_adc_buffer_setup_ops); if (ret) { dev_err(&pdev->dev, "buffer setup failed\n"); diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c index 1ae86e7359f73dd8b40fd41d9abffe6ae8e49846..1b0046cc717b70b336275aeecc89bbee003d6f1f 100644 --- a/drivers/iio/adc/xilinx-xadc-core.c +++ b/drivers/iio/adc/xilinx-xadc-core.c @@ -103,6 +103,16 @@ static const unsigned int XADC_ZYNQ_UNMASK_TIMEOUT = 500; #define XADC_FLAGS_BUFFERED BIT(0) +/* + * The XADC hardware supports a samplerate of up to 1MSPS. Unfortunately it does + * not have a hardware FIFO. Which means an interrupt is generated for each + * conversion sequence. At 1MSPS sample rate the CPU in ZYNQ7000 is completely + * overloaded by the interrupts that it soft-lockups. For this reason the driver + * limits the maximum samplerate 150kSPS. At this rate the CPU is fairly busy, + * but still responsive. + */ +#define XADC_MAX_SAMPLERATE 150000 + static void xadc_write_reg(struct xadc *xadc, unsigned int reg, uint32_t val) { @@ -675,7 +685,7 @@ static int xadc_trigger_set_state(struct iio_trigger *trigger, bool state) spin_lock_irqsave(&xadc->lock, flags); xadc_read_reg(xadc, XADC_AXI_REG_IPIER, &val); - xadc_write_reg(xadc, XADC_AXI_REG_IPISR, val & XADC_AXI_INT_EOS); + xadc_write_reg(xadc, XADC_AXI_REG_IPISR, XADC_AXI_INT_EOS); if (state) val |= XADC_AXI_INT_EOS; else @@ -723,13 +733,14 @@ static int xadc_power_adc_b(struct xadc *xadc, unsigned int seq_mode) { uint16_t val; + /* Powerdown the ADC-B when it is not needed. */ switch (seq_mode) { case XADC_CONF1_SEQ_SIMULTANEOUS: case XADC_CONF1_SEQ_INDEPENDENT: - val = XADC_CONF2_PD_ADC_B; + val = 0; break; default: - val = 0; + val = XADC_CONF2_PD_ADC_B; break; } @@ -798,6 +809,16 @@ static int xadc_preenable(struct iio_dev *indio_dev) if (ret) goto err; + /* + * In simultaneous mode the upper and lower aux channels are samples at + * the same time. In this mode the upper 8 bits in the sequencer + * register are don't care and the lower 8 bits control two channels + * each. As such we must set the bit if either the channel in the lower + * group or the upper group is enabled. + */ + if (seq_mode == XADC_CONF1_SEQ_SIMULTANEOUS) + scan_mask = ((scan_mask >> 8) | scan_mask) & 0xff0000; + ret = xadc_write_adc_reg(xadc, XADC_REG_SEQ(1), scan_mask >> 16); if (ret) goto err; @@ -824,11 +845,27 @@ static const struct iio_buffer_setup_ops xadc_buffer_ops = { .postdisable = &xadc_postdisable, }; +static int xadc_read_samplerate(struct xadc *xadc) +{ + unsigned int div; + uint16_t val16; + int ret; + + ret = xadc_read_adc_reg(xadc, XADC_REG_CONF2, &val16); + if (ret) + return ret; + + div = (val16 & XADC_CONF2_DIV_MASK) >> XADC_CONF2_DIV_OFFSET; + if (div < 2) + div = 2; + + return xadc_get_dclk_rate(xadc) / div / 26; +} + static int xadc_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val, int *val2, long info) { struct xadc *xadc = iio_priv(indio_dev); - unsigned int div; uint16_t val16; int ret; @@ -881,41 +918,31 @@ static int xadc_read_raw(struct iio_dev *indio_dev, *val = -((273150 << 12) / 503975); return IIO_VAL_INT; case IIO_CHAN_INFO_SAMP_FREQ: - ret = xadc_read_adc_reg(xadc, XADC_REG_CONF2, &val16); - if (ret) + ret = xadc_read_samplerate(xadc); + if (ret < 0) return ret; - div = (val16 & XADC_CONF2_DIV_MASK) >> XADC_CONF2_DIV_OFFSET; - if (div < 2) - div = 2; - - *val = xadc_get_dclk_rate(xadc) / div / 26; - + *val = ret; return IIO_VAL_INT; default: return -EINVAL; } } -static int xadc_write_raw(struct iio_dev *indio_dev, - struct iio_chan_spec const *chan, int val, int val2, long info) +static int xadc_write_samplerate(struct xadc *xadc, int val) { - struct xadc *xadc = iio_priv(indio_dev); unsigned long clk_rate = xadc_get_dclk_rate(xadc); unsigned int div; if (!clk_rate) return -EINVAL; - if (info != IIO_CHAN_INFO_SAMP_FREQ) - return -EINVAL; - if (val <= 0) return -EINVAL; /* Max. 150 kSPS */ - if (val > 150000) - val = 150000; + if (val > XADC_MAX_SAMPLERATE) + val = XADC_MAX_SAMPLERATE; val *= 26; @@ -928,7 +955,7 @@ static int xadc_write_raw(struct iio_dev *indio_dev, * limit. */ div = clk_rate / val; - if (clk_rate / div / 26 > 150000) + if (clk_rate / div / 26 > XADC_MAX_SAMPLERATE) div++; if (div < 2) div = 2; @@ -939,6 +966,17 @@ static int xadc_write_raw(struct iio_dev *indio_dev, div << XADC_CONF2_DIV_OFFSET); } +static int xadc_write_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, int val, int val2, long info) +{ + struct xadc *xadc = iio_priv(indio_dev); + + if (info != IIO_CHAN_INFO_SAMP_FREQ) + return -EINVAL; + + return xadc_write_samplerate(xadc, val); +} + static const struct iio_event_spec xadc_temp_events[] = { { .type = IIO_EV_TYPE_THRESH, @@ -1226,6 +1264,21 @@ static int xadc_probe(struct platform_device *pdev) if (ret) goto err_free_samplerate_trigger; + /* + * Make sure not to exceed the maximum samplerate since otherwise the + * resulting interrupt storm will soft-lock the system. + */ + if (xadc->ops->flags & XADC_FLAGS_BUFFERED) { + ret = xadc_read_samplerate(xadc); + if (ret < 0) + goto err_free_samplerate_trigger; + if (ret > XADC_MAX_SAMPLERATE) { + ret = xadc_write_samplerate(xadc, XADC_MAX_SAMPLERATE); + if (ret < 0) + goto err_free_samplerate_trigger; + } + } + ret = request_irq(xadc->irq, xadc->ops->interrupt_handler, 0, dev_name(&pdev->dev), indio_dev); if (ret) diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c index 26fbd1bd941352d31cb42e0f240f88f20c12fab5..09279e40c55ca8820f6695a20839469ba6cf1345 100644 --- a/drivers/iio/common/st_sensors/st_sensors_core.c +++ b/drivers/iio/common/st_sensors/st_sensors_core.c @@ -93,7 +93,7 @@ int st_sensors_set_odr(struct iio_dev *indio_dev, unsigned int odr) struct st_sensor_odr_avl odr_out = {0, 0}; struct st_sensor_data *sdata = iio_priv(indio_dev); - if (!sdata->sensor_settings->odr.addr) + if (!sdata->sensor_settings->odr.mask) return 0; err = st_sensors_match_odr(sdata->sensor_settings, odr, &odr_out); diff --git a/drivers/iio/light/si1133.c b/drivers/iio/light/si1133.c index 015a21f0c2ef412de69b0d180f6982e183840d1d..9174ab9288808af9e2bfc231e4d7cd1e0329606a 100644 --- a/drivers/iio/light/si1133.c +++ b/drivers/iio/light/si1133.c @@ -102,6 +102,9 @@ #define SI1133_INPUT_FRACTION_LOW 15 #define SI1133_LUX_OUTPUT_FRACTION 12 #define SI1133_LUX_BUFFER_SIZE 9 +#define SI1133_MEASURE_BUFFER_SIZE 3 + +#define SI1133_SIGN_BIT_INDEX 23 static const int si1133_scale_available[] = { 1, 2, 4, 8, 16, 32, 64, 128}; @@ -234,13 +237,13 @@ static const struct si1133_lux_coeff lux_coeff = { } }; -static int si1133_calculate_polynomial_inner(u32 input, u8 fraction, u16 mag, +static int si1133_calculate_polynomial_inner(s32 input, u8 fraction, u16 mag, s8 shift) { return ((input << fraction) / mag) << shift; } -static int si1133_calculate_output(u32 x, u32 y, u8 x_order, u8 y_order, +static int si1133_calculate_output(s32 x, s32 y, u8 x_order, u8 y_order, u8 input_fraction, s8 sign, const struct si1133_coeff *coeffs) { @@ -276,7 +279,7 @@ static int si1133_calculate_output(u32 x, u32 y, u8 x_order, u8 y_order, * The algorithm is from: * https://siliconlabs.github.io/Gecko_SDK_Doc/efm32zg/html/si1133_8c_source.html#l00716 */ -static int si1133_calc_polynomial(u32 x, u32 y, u8 input_fraction, u8 num_coeff, +static int si1133_calc_polynomial(s32 x, s32 y, u8 input_fraction, u8 num_coeff, const struct si1133_coeff *coeffs) { u8 x_order, y_order; @@ -614,7 +617,7 @@ static int si1133_measure(struct si1133_data *data, { int err; - __be16 resp; + u8 buffer[SI1133_MEASURE_BUFFER_SIZE]; err = si1133_set_adcmux(data, 0, chan->channel); if (err) @@ -625,12 +628,13 @@ static int si1133_measure(struct si1133_data *data, if (err) return err; - err = si1133_bulk_read(data, SI1133_REG_HOSTOUT(0), sizeof(resp), - (u8 *)&resp); + err = si1133_bulk_read(data, SI1133_REG_HOSTOUT(0), sizeof(buffer), + buffer); if (err) return err; - *val = be16_to_cpu(resp); + *val = sign_extend32((buffer[0] << 16) | (buffer[1] << 8) | buffer[2], + SI1133_SIGN_BIT_INDEX); return err; } @@ -704,9 +708,9 @@ static int si1133_get_lux(struct si1133_data *data, int *val) { int err; int lux; - u32 high_vis; - u32 low_vis; - u32 ir; + s32 high_vis; + s32 low_vis; + s32 ir; u8 buffer[SI1133_LUX_BUFFER_SIZE]; /* Activate lux channels */ @@ -719,9 +723,16 @@ static int si1133_get_lux(struct si1133_data *data, int *val) if (err) return err; - high_vis = (buffer[0] << 16) | (buffer[1] << 8) | buffer[2]; - low_vis = (buffer[3] << 16) | (buffer[4] << 8) | buffer[5]; - ir = (buffer[6] << 16) | (buffer[7] << 8) | buffer[8]; + high_vis = + sign_extend32((buffer[0] << 16) | (buffer[1] << 8) | buffer[2], + SI1133_SIGN_BIT_INDEX); + + low_vis = + sign_extend32((buffer[3] << 16) | (buffer[4] << 8) | buffer[5], + SI1133_SIGN_BIT_INDEX); + + ir = sign_extend32((buffer[6] << 16) | (buffer[7] << 8) | buffer[8], + SI1133_SIGN_BIT_INDEX); if (high_vis > SI1133_ADC_THRESHOLD || ir > SI1133_ADC_THRESHOLD) lux = si1133_calc_polynomial(high_vis, ir, diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index 6e96a2fb97dc442eb7093db52644f91fddbe8456..df8f5ceea2dd4334089d521425f56bc05b089eef 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c @@ -408,16 +408,15 @@ static int addr6_resolve(struct sockaddr_in6 *src_in, struct flowi6 fl6; struct dst_entry *dst; struct rt6_info *rt; - int ret; memset(&fl6, 0, sizeof fl6); fl6.daddr = dst_in->sin6_addr; fl6.saddr = src_in->sin6_addr; fl6.flowi6_oif = addr->bound_dev_if; - ret = ipv6_stub->ipv6_dst_lookup(addr->net, NULL, &dst, &fl6); - if (ret < 0) - return ret; + dst = ipv6_stub->ipv6_dst_lookup_flow(addr->net, NULL, &fl6, NULL); + if (IS_ERR(dst)) + return PTR_ERR(dst); rt = (struct rt6_info *)dst; if (ipv6_addr_any(&src_in->sin6_addr)) { diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index e16872e0724ff130e9902382fad27997ad75540e..5c03f4701ece23e147c3ad332066e12e44823880 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -2753,6 +2753,7 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv) err2: kfree(route->path_rec); route->path_rec = NULL; + route->num_paths = 0; err1: kfree(work); return ret; @@ -4635,6 +4636,19 @@ static int __init cma_init(void) { int ret; + /* + * There is a rare lock ordering dependency in cma_netdev_callback() + * that only happens when bonding is enabled. Teach lockdep that rtnl + * must never be nested under lock so it can find these without having + * to test with bonding. + */ + if (IS_ENABLED(CONFIG_LOCKDEP)) { + rtnl_lock(); + mutex_lock(&lock); + mutex_unlock(&lock); + rtnl_unlock(); + } + cma_wq = alloc_ordered_workqueue("rdma_cm", WQ_MEM_RECLAIM); if (!cma_wq) return -ENOMEM; diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c index c4118bcd5103565e3b20b6a970e67322061d195f..bf937fec50dcad1856a594ae2eedf3c4515f1bfa 100644 --- a/drivers/infiniband/core/rdma_core.c +++ b/drivers/infiniband/core/rdma_core.c @@ -381,7 +381,7 @@ lookup_get_fd_uobject(const struct uverbs_api_object *obj, * and the caller is expected to ensure that uverbs_close_fd is never * done while a call top lookup is possible. */ - if (f->f_op != fd_type->fops) { + if (f->f_op != fd_type->fops || uobject->ufile != ufile) { fput(f); return ERR_PTR(-EBADF); } @@ -697,7 +697,6 @@ void rdma_lookup_put_uobject(struct ib_uobject *uobj, enum rdma_lookup_mode mode) { assert_uverbs_usecnt(uobj, mode); - uobj->uapi_object->type_class->lookup_put(uobj, mode); /* * In order to unlock an object, either decrease its usecnt for * read access or zero it in case of exclusive access. See @@ -714,6 +713,7 @@ void rdma_lookup_put_uobject(struct ib_uobject *uobj, break; } + uobj->uapi_object->type_class->lookup_put(uobj, mode); /* Pairs with the kref obtained by type->lookup_get */ uverbs_uobject_put(uobj); } diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index 01d68ed46c1b6c530a717a7efd8866dd62dc6506..2acc30c3d5b2db00b3df01e92a0c32181f18ee1a 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c @@ -89,6 +89,7 @@ struct ucma_context { struct ucma_file *file; struct rdma_cm_id *cm_id; + struct mutex mutex; u64 uid; struct list_head list; @@ -215,6 +216,7 @@ static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file) init_completion(&ctx->comp); INIT_LIST_HEAD(&ctx->mc_list); ctx->file = file; + mutex_init(&ctx->mutex); mutex_lock(&mut); ctx->id = idr_alloc(&ctx_idr, ctx, 0, 0, GFP_KERNEL); @@ -596,6 +598,7 @@ static int ucma_free_ctx(struct ucma_context *ctx) } events_reported = ctx->events_reported; + mutex_destroy(&ctx->mutex); kfree(ctx); return events_reported; } @@ -665,7 +668,10 @@ static ssize_t ucma_bind_ip(struct ucma_file *file, const char __user *inbuf, if (IS_ERR(ctx)) return PTR_ERR(ctx); + mutex_lock(&ctx->mutex); ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr); + mutex_unlock(&ctx->mutex); + ucma_put_ctx(ctx); return ret; } @@ -688,7 +694,9 @@ static ssize_t ucma_bind(struct ucma_file *file, const char __user *inbuf, if (IS_ERR(ctx)) return PTR_ERR(ctx); + mutex_lock(&ctx->mutex); ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr); + mutex_unlock(&ctx->mutex); ucma_put_ctx(ctx); return ret; } @@ -712,8 +720,10 @@ static ssize_t ucma_resolve_ip(struct ucma_file *file, if (IS_ERR(ctx)) return PTR_ERR(ctx); + mutex_lock(&ctx->mutex); ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr, (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms); + mutex_unlock(&ctx->mutex); ucma_put_ctx(ctx); return ret; } @@ -738,8 +748,10 @@ static ssize_t ucma_resolve_addr(struct ucma_file *file, if (IS_ERR(ctx)) return PTR_ERR(ctx); + mutex_lock(&ctx->mutex); ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr, (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms); + mutex_unlock(&ctx->mutex); ucma_put_ctx(ctx); return ret; } @@ -759,7 +771,9 @@ static ssize_t ucma_resolve_route(struct ucma_file *file, if (IS_ERR(ctx)) return PTR_ERR(ctx); + mutex_lock(&ctx->mutex); ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms); + mutex_unlock(&ctx->mutex); ucma_put_ctx(ctx); return ret; } @@ -848,6 +862,7 @@ static ssize_t ucma_query_route(struct ucma_file *file, if (IS_ERR(ctx)) return PTR_ERR(ctx); + mutex_lock(&ctx->mutex); memset(&resp, 0, sizeof resp); addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr; memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ? @@ -871,6 +886,7 @@ static ssize_t ucma_query_route(struct ucma_file *file, ucma_copy_iw_route(&resp, &ctx->cm_id->route); out: + mutex_unlock(&ctx->mutex); if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp))) ret = -EFAULT; @@ -1022,6 +1038,7 @@ static ssize_t ucma_query(struct ucma_file *file, if (IS_ERR(ctx)) return PTR_ERR(ctx); + mutex_lock(&ctx->mutex); switch (cmd.option) { case RDMA_USER_CM_QUERY_ADDR: ret = ucma_query_addr(ctx, response, out_len); @@ -1036,6 +1053,7 @@ static ssize_t ucma_query(struct ucma_file *file, ret = -ENOSYS; break; } + mutex_unlock(&ctx->mutex); ucma_put_ctx(ctx); return ret; @@ -1076,7 +1094,9 @@ static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf, return PTR_ERR(ctx); ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param); + mutex_lock(&ctx->mutex); ret = rdma_connect(ctx->cm_id, &conn_param); + mutex_unlock(&ctx->mutex); ucma_put_ctx(ctx); return ret; } @@ -1097,7 +1117,9 @@ static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf, ctx->backlog = cmd.backlog > 0 && cmd.backlog < max_backlog ? cmd.backlog : max_backlog; + mutex_lock(&ctx->mutex); ret = rdma_listen(ctx->cm_id, ctx->backlog); + mutex_unlock(&ctx->mutex); ucma_put_ctx(ctx); return ret; } @@ -1120,13 +1142,17 @@ static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf, if (cmd.conn_param.valid) { ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param); mutex_lock(&file->mut); + mutex_lock(&ctx->mutex); ret = __rdma_accept(ctx->cm_id, &conn_param, NULL); + mutex_unlock(&ctx->mutex); if (!ret) ctx->uid = cmd.uid; mutex_unlock(&file->mut); - } else + } else { + mutex_lock(&ctx->mutex); ret = __rdma_accept(ctx->cm_id, NULL, NULL); - + mutex_unlock(&ctx->mutex); + } ucma_put_ctx(ctx); return ret; } @@ -1145,7 +1171,9 @@ static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf, if (IS_ERR(ctx)) return PTR_ERR(ctx); + mutex_lock(&ctx->mutex); ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len); + mutex_unlock(&ctx->mutex); ucma_put_ctx(ctx); return ret; } @@ -1164,7 +1192,9 @@ static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf, if (IS_ERR(ctx)) return PTR_ERR(ctx); + mutex_lock(&ctx->mutex); ret = rdma_disconnect(ctx->cm_id); + mutex_unlock(&ctx->mutex); ucma_put_ctx(ctx); return ret; } @@ -1195,7 +1225,9 @@ static ssize_t ucma_init_qp_attr(struct ucma_file *file, resp.qp_attr_mask = 0; memset(&qp_attr, 0, sizeof qp_attr); qp_attr.qp_state = cmd.qp_state; + mutex_lock(&ctx->mutex); ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask); + mutex_unlock(&ctx->mutex); if (ret) goto out; @@ -1274,9 +1306,13 @@ static int ucma_set_ib_path(struct ucma_context *ctx, struct sa_path_rec opa; sa_convert_path_ib_to_opa(&opa, &sa_path); + mutex_lock(&ctx->mutex); ret = rdma_set_ib_path(ctx->cm_id, &opa); + mutex_unlock(&ctx->mutex); } else { + mutex_lock(&ctx->mutex); ret = rdma_set_ib_path(ctx->cm_id, &sa_path); + mutex_unlock(&ctx->mutex); } if (ret) return ret; @@ -1309,7 +1345,9 @@ static int ucma_set_option_level(struct ucma_context *ctx, int level, switch (level) { case RDMA_OPTION_ID: + mutex_lock(&ctx->mutex); ret = ucma_set_option_id(ctx, optname, optval, optlen); + mutex_unlock(&ctx->mutex); break; case RDMA_OPTION_IB: ret = ucma_set_option_ib(ctx, optname, optval, optlen); @@ -1369,8 +1407,10 @@ static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf, if (IS_ERR(ctx)) return PTR_ERR(ctx); + mutex_lock(&ctx->mutex); if (ctx->cm_id->device) ret = rdma_notify(ctx->cm_id, (enum ib_event_type)cmd.event); + mutex_unlock(&ctx->mutex); ucma_put_ctx(ctx); return ret; @@ -1413,8 +1453,10 @@ static ssize_t ucma_process_join(struct ucma_file *file, mc->join_state = join_state; mc->uid = cmd->uid; memcpy(&mc->addr, addr, cmd->addr_size); + mutex_lock(&ctx->mutex); ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *)&mc->addr, join_state, mc); + mutex_unlock(&ctx->mutex); if (ret) goto err2; @@ -1518,7 +1560,10 @@ static ssize_t ucma_leave_multicast(struct ucma_file *file, goto out; } + mutex_lock(&mc->ctx->mutex); rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr); + mutex_unlock(&mc->ctx->mutex); + mutex_lock(&mc->ctx->file->mut); ucma_cleanup_mc_events(mc); list_del(&mc->list); diff --git a/drivers/infiniband/hw/hfi1/sysfs.c b/drivers/infiniband/hw/hfi1/sysfs.c index 25e867393463e5cc634d9a304e85dba8f5ddb89d..e3e8d65646e34cd80adcd0f1a56d4f053990e888 100644 --- a/drivers/infiniband/hw/hfi1/sysfs.c +++ b/drivers/infiniband/hw/hfi1/sysfs.c @@ -670,7 +670,11 @@ int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num, dd_dev_err(dd, "Skipping sc2vl sysfs info, (err %d) port %u\n", ret, port_num); - goto bail; + /* + * Based on the documentation for kobject_init_and_add(), the + * caller should call kobject_put even if this call fails. + */ + goto bail_sc2vl; } kobject_uevent(&ppd->sc2vl_kobj, KOBJ_ADD); @@ -680,7 +684,7 @@ int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num, dd_dev_err(dd, "Skipping sl2sc sysfs info, (err %d) port %u\n", ret, port_num); - goto bail_sc2vl; + goto bail_sl2sc; } kobject_uevent(&ppd->sl2sc_kobj, KOBJ_ADD); @@ -690,7 +694,7 @@ int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num, dd_dev_err(dd, "Skipping vl2mtu sysfs info, (err %d) port %u\n", ret, port_num); - goto bail_sl2sc; + goto bail_vl2mtu; } kobject_uevent(&ppd->vl2mtu_kobj, KOBJ_ADD); @@ -700,7 +704,7 @@ int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num, dd_dev_err(dd, "Skipping Congestion Control sysfs info, (err %d) port %u\n", ret, port_num); - goto bail_vl2mtu; + goto bail_cc; } kobject_uevent(&ppd->pport_cc_kobj, KOBJ_ADD); @@ -738,7 +742,6 @@ int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num, kobject_put(&ppd->sl2sc_kobj); bail_sc2vl: kobject_put(&ppd->sc2vl_kobj); -bail: return ret; } @@ -858,8 +861,13 @@ int hfi1_verbs_register_sysfs(struct hfi1_devdata *dd) for (i = 0; i < ARRAY_SIZE(hfi1_attributes); ++i) device_remove_file(&dev->dev, hfi1_attributes[i]); - for (i = 0; i < dd->num_sdma; i++) - kobject_del(&dd->per_sdma[i].kobj); + /* + * The function kobject_put() will call kobject_del() if the kobject + * has been added successfully. The sysfs files created under the + * kobject directory will also be removed during the process. + */ + for (; i >= 0; i--) + kobject_put(&dd->per_sdma[i].kobj); return ret; } @@ -872,6 +880,10 @@ void hfi1_verbs_unregister_sysfs(struct hfi1_devdata *dd) struct hfi1_pportdata *ppd; int i; + /* Unwind operations in hfi1_verbs_register_sysfs() */ + for (i = 0; i < dd->num_sdma; i++) + kobject_put(&dd->per_sdma[i].kobj); + for (i = 0; i < dd->num_pports; i++) { ppd = &dd->pport[i]; diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index a19d3ad14dc374309ee1d1025ff5140aa45d0880..eac4ade4561193c28ada515cf8821300a2a604de 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -1606,8 +1606,9 @@ static int __mlx4_ib_create_default_rules( int i; for (i = 0; i < ARRAY_SIZE(pdefault_rules->rules_create_list); i++) { + union ib_flow_spec ib_spec = {}; int ret; - union ib_flow_spec ib_spec; + switch (pdefault_rules->rules_create_list[i]) { case 0: /* no rule */ diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 2db34f7b5ced1b781569565ea50498309b2e72c5..f41f3ff689c55e22e3963b7252558a47c0540813 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -1070,12 +1070,10 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, if (MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) resp.tunnel_offloads_caps |= MLX5_IB_TUNNELED_OFFLOADS_GRE; - if (MLX5_CAP_GEN(mdev, flex_parser_protocols) & - MLX5_FLEX_PROTO_CW_MPLS_GRE) + if (MLX5_CAP_ETH(mdev, tunnel_stateless_mpls_over_gre)) resp.tunnel_offloads_caps |= MLX5_IB_TUNNELED_OFFLOADS_MPLS_GRE; - if (MLX5_CAP_GEN(mdev, flex_parser_protocols) & - MLX5_FLEX_PROTO_CW_MPLS_UDP) + if (MLX5_CAP_ETH(mdev, tunnel_stateless_mpls_over_udp)) resp.tunnel_offloads_caps |= MLX5_IB_TUNNELED_OFFLOADS_MPLS_UDP; } diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 4fc9278d0ddec820b502c5233427344238bcdbe9..10f6ae4f8f3ff43b3870ed090dff47be00b97185 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -4887,7 +4887,9 @@ static void to_rdma_ah_attr(struct mlx5_ib_dev *ibdev, rdma_ah_set_path_bits(ah_attr, path->grh_mlid & 0x7f); rdma_ah_set_static_rate(ah_attr, path->static_rate ? path->static_rate - 5 : 0); - if (path->grh_mlid & (1 << 7)) { + + if (path->grh_mlid & (1 << 7) || + ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) { u32 tc_fl = be32_to_cpu(path->tclass_flowlabel); rdma_ah_set_grh(ah_attr, NULL, diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c index 54add70c22b5c370bf7c8648b30cc61df9b26644..7903bd5c639eaa4ec076d6b37a31cc985fd32f27 100644 --- a/drivers/infiniband/sw/rxe/rxe_net.c +++ b/drivers/infiniband/sw/rxe/rxe_net.c @@ -154,10 +154,12 @@ static struct dst_entry *rxe_find_route6(struct net_device *ndev, memcpy(&fl6.daddr, daddr, sizeof(*daddr)); fl6.flowi6_proto = IPPROTO_UDP; - if (unlikely(ipv6_stub->ipv6_dst_lookup(sock_net(recv_sockets.sk6->sk), - recv_sockets.sk6->sk, &ndst, &fl6))) { + ndst = ipv6_stub->ipv6_dst_lookup_flow(sock_net(recv_sockets.sk6->sk), + recv_sockets.sk6->sk, &fl6, + NULL); + if (unlikely(IS_ERR(ndst))) { pr_err_ratelimited("no route to %pI6\n", daddr); - goto put; + return NULL; } if (unlikely(ndst->error)) { diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index 136f6e7bf797767256e66c1c083cb80c55cd7a1b..0d0f977a2f393dcabc09e01c5178e2f2117354ca 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h @@ -534,6 +534,17 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = { DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo LaVie Z"), }, }, + { + /* + * Acer Aspire 5738z + * Touchpad stops working in mux mode when dis- + re-enabled + * with the touchpad enable/disable toggle hotkey + */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5738"), + }, + }, { } }; diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index c60395b7470f640cc229c53bbd825235d61194c1..c0e50c5c317b401c997f6acf8f50ee24da7c4fee 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -58,6 +58,18 @@ config IOMMU_IO_PGTABLE_ARMV7S_SELFTEST If unsure, say N here. +config IOMMU_IO_PGTABLE_FAST + bool "Fast ARMv7/v8 Long Descriptor Format" + depends on ARM64_DMA_USE_IOMMU && IOMMU_DMA + help + Enable support for a subset of the ARM long descriptor pagetable + format. This allocator achieves fast performance by + pre-allocating and pre-populating page table memory up front. + only supports a 32 bit virtual address space. + + This implementation is mainly optimized for use cases where the + buffers are small (<= 64K) since it only supports 4K page sizes. + endmenu config IOMMU_DEBUGFS @@ -332,11 +344,12 @@ config SPAPR_TCE_IOMMU # ARM IOMMU support config ARM_SMMU - bool "ARM Ltd. System MMU (SMMU) Support" + tristate "ARM Ltd. System MMU (SMMU) Support" depends on (ARM64 || ARM) && MMU select IOMMU_API select IOMMU_IO_PGTABLE_LPAE select ARM_DMA_USE_IOMMU if ARM + select ARM64_DMA_USE_IOMMU if ARM64 help Support for implementations of the ARM System MMU architecture versions 1 and 2. diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile index e13ea199f5896ec4e7b994e6d18fec0802de45ed..11d6de2ddc0241fcf7752ac3bb52d7383dc40da8 100644 --- a/drivers/iommu/Makefile +++ b/drivers/iommu/Makefile @@ -8,6 +8,7 @@ obj-$(CONFIG_IOMMU_IO_PGTABLE) += io-pgtable.o obj-$(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) += io-pgtable-arm-v7s.o obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE) += io-pgtable-arm.o obj-$(CONFIG_IOMMU_IOVA) += iova.o +obj-$(CONFIG_IOMMU_IO_PGTABLE_FAST) += io-pgtable-fast.o dma-mapping-fast.o obj-$(CONFIG_OF_IOMMU) += of_iommu.o obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o amd_iommu_quirks.o diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index 465f28a7844c227f4f6f28243ab7ef5d66d02fa1..2557ed112bc24f892c1bd0708516648bbe34005d 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -2836,7 +2836,7 @@ static int __init parse_amd_iommu_intr(char *str) { for (; *str; ++str) { if (strncmp(str, "legacy", 6) == 0) { - amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY; + amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA; break; } if (strncmp(str, "vapic", 5) == 0) { diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h index 69f3d4c95b530788edeea18b904277a58721861f..859b06424e5c410ba355c9f50287cc6e394c1103 100644 --- a/drivers/iommu/amd_iommu_types.h +++ b/drivers/iommu/amd_iommu_types.h @@ -352,7 +352,7 @@ #define DTE_GCR3_VAL_A(x) (((x) >> 12) & 0x00007ULL) #define DTE_GCR3_VAL_B(x) (((x) >> 15) & 0x0ffffULL) -#define DTE_GCR3_VAL_C(x) (((x) >> 31) & 0xfffffULL) +#define DTE_GCR3_VAL_C(x) (((x) >> 31) & 0x1fffffULL) #define DTE_GCR3_INDEX_A 0 #define DTE_GCR3_INDEX_B 1 diff --git a/drivers/iommu/arm-smmu-regs.h b/drivers/iommu/arm-smmu-regs.h index a1226e4ab5f89716ec50d8b8f64594ed3c36453f..3921487dde808dcd00bfebf853303134e38ea42c 100644 --- a/drivers/iommu/arm-smmu-regs.h +++ b/drivers/iommu/arm-smmu-regs.h @@ -37,6 +37,9 @@ #define sCR0_VMID16EN (1 << 31) #define sCR0_BSU_SHIFT 14 #define sCR0_BSU_MASK 0x3 +#define sCR0_SHCFG_SHIFT 22 +#define sCR0_SHCFG_MASK 0x3 +#define sCR0_SHCFG_NSH 3 /* Auxiliary Configuration register */ #define ARM_SMMU_GR0_sACR 0x10 @@ -105,6 +108,8 @@ #define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2)) #define SMR_VALID (1 << 31) #define SMR_MASK_SHIFT 16 +#define SMR_MASK_MASK 0x7FFF +#define SID_MASK 0x7FFF #define SMR_ID_SHIFT 0 #define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2)) @@ -113,6 +118,9 @@ #define S2CR_EXIDVALID (1 << 10) #define S2CR_TYPE_SHIFT 16 #define S2CR_TYPE_MASK 0x3 +#define S2CR_SHCFG_SHIFT 8 +#define S2CR_SHCFG_MASK 0x3 +#define S2CR_SHCFG_NSH 0x3 enum arm_smmu_s2cr_type { S2CR_TYPE_TRANS, S2CR_TYPE_BYPASS, @@ -147,6 +155,9 @@ enum arm_smmu_s2cr_privcfg { #define CBAR_IRPTNDX_SHIFT 24 #define CBAR_IRPTNDX_MASK 0xff +#define ARM_SMMU_GR1_CBFRSYNRA(n) (0x400 + ((n) << 2)) +#define CBFRSYNRA_SID_MASK (0xffff) + #define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2)) #define CBA2R_RW64_32BIT (0 << 0) #define CBA2R_RW64_64BIT (1 << 0) @@ -165,20 +176,51 @@ enum arm_smmu_s2cr_privcfg { #define ARM_SMMU_CB_S1_MAIR1 0x3c #define ARM_SMMU_CB_PAR 0x50 #define ARM_SMMU_CB_FSR 0x58 +#define ARM_SMMU_CB_FSRRESTORE 0x5c #define ARM_SMMU_CB_FAR 0x60 #define ARM_SMMU_CB_FSYNR0 0x68 +#define ARM_SMMU_CB_FSYNR1 0x6c #define ARM_SMMU_CB_S1_TLBIVA 0x600 #define ARM_SMMU_CB_S1_TLBIASID 0x610 +#define ARM_SMMU_CB_S1_TLBIALL 0x618 #define ARM_SMMU_CB_S1_TLBIVAL 0x620 #define ARM_SMMU_CB_S2_TLBIIPAS2 0x630 #define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638 #define ARM_SMMU_CB_TLBSYNC 0x7f0 #define ARM_SMMU_CB_TLBSTATUS 0x7f4 +#define TLBSTATUS_SACTIVE (1 << 0) #define ARM_SMMU_CB_ATS1PR 0x800 #define ARM_SMMU_CB_ATSR 0x8f0 +#define ARM_SMMU_STATS_SYNC_INV_TBU_ACK 0x25dc +#define TBU_SYNC_ACK_MASK 0x1ff +#define TBU_SYNC_ACK_SHIFT 17 +#define TBU_SYNC_REQ_MASK 0x1 +#define TBU_SYNC_REQ_SHIFT 16 +#define TBU_INV_ACK_MASK 0x1ff +#define TBU_INV_ACK_SHIFT 1 +#define TBU_INV_REQ_MASK 0x1 +#define TBU_INV_REQ_SHIFT 0 +#define ARM_SMMU_TBU_PWR_STATUS 0x2204 +#define ARM_SMMU_MMU2QSS_AND_SAFE_WAIT_CNTR 0x2670 +#define TCU_SYNC_IN_PRGSS_MASK 0x1 +#define TCU_SYNC_IN_PRGSS_SHIFT 20 +#define TCU_INV_IN_PRGSS_MASK 0x1 +#define TCU_INV_IN_PRGSS_SHIFT 16 +#define TBUID_SHIFT 10 +#define SCTLR_MEM_ATTR_SHIFT 16 +#define SCTLR_SHCFG_SHIFT 22 +#define SCTLR_RACFG_SHIFT 24 +#define SCTLR_WACFG_SHIFT 26 +#define SCTLR_SHCFG_MASK 0x3 +#define SCTLR_SHCFG_NSH 0x3 +#define SCTLR_RACFG_RA 0x2 +#define SCTLR_WACFG_WA 0x2 +#define SCTLR_MEM_ATTR_OISH_WB_CACHE 0xf +#define SCTLR_MTCFG (1 << 20) #define SCTLR_S1_ASIDPNE (1 << 12) #define SCTLR_CFCFG (1 << 7) +#define SCTLR_HUPCF (1 << 8) #define SCTLR_CFIE (1 << 6) #define SCTLR_CFRE (1 << 5) #define SCTLR_E (1 << 4) @@ -217,4 +259,18 @@ enum arm_smmu_s2cr_privcfg { #define FSYNR0_WNR (1 << 4) +#define IMPL_DEF1_MICRO_MMU_CTRL 0 +#define MICRO_MMU_CTRL_LOCAL_HALT_REQ (1 << 2) +#define MICRO_MMU_CTRL_IDLE (1 << 3) + +/* Definitions for implementation-defined registers */ +#define ACTLR_QCOM_OSH_SHIFT 28 +#define ACTLR_QCOM_OSH 1 + +#define ACTLR_QCOM_ISH_SHIFT 29 +#define ACTLR_QCOM_ISH 1 + +#define ACTLR_QCOM_NSH_SHIFT 30 +#define ACTLR_QCOM_NSH 1 + #endif /* _ARM_SMMU_REGS_H */ diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index d662895690162823476ddbeb16e747bbe88e22d4..299cf66b632a1b0f466e67c211a8c8e7bd8477c1 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -2188,7 +2188,9 @@ static int arm_smmu_legacy_bus_init(void) arm_smmu_bus_init(); return 0; } +#ifndef MODULE device_initcall_sync(arm_smmu_legacy_bus_init); +#endif static int arm_smmu_device_remove(struct platform_device *pdev) { @@ -2230,7 +2232,26 @@ static struct platform_driver arm_smmu_driver = { .remove = arm_smmu_device_remove, .shutdown = arm_smmu_device_shutdown, }; -module_platform_driver(arm_smmu_driver); + +static int __init arm_smmu_driver_init(void) +{ + int ret; + + ret = platform_driver_register(&arm_smmu_driver); +#ifdef MODULE + if (!ret) + arm_smmu_legacy_bus_init(); +#endif + return ret; +} + +static void __exit arm_smmu_driver_exit(void) +{ + platform_driver_unregister(&arm_smmu_driver); +} + +subsys_initcall(arm_smmu_driver_init); +module_exit(arm_smmu_driver_exit); MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations"); MODULE_AUTHOR("Will Deacon "); diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 31ba8f92584a338b211c767ca9cdf310a5364c6f..a3659e67346d6c8c24341c20737870eb2f1353e1 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -307,6 +307,7 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, return 0; } + iovad->end_pfn = end_pfn; init_iova_domain(iovad, 1UL << order, base_pfn); if (!dev) return 0; @@ -315,6 +316,50 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, } EXPORT_SYMBOL(iommu_dma_init_domain); +/* + * Should be called prior to using dma-apis + */ +int iommu_dma_reserve_iova(struct device *dev, dma_addr_t base, + u64 size) +{ + struct iommu_domain *domain; + struct iova_domain *iovad; + unsigned long pfn_lo, pfn_hi; + + domain = iommu_get_domain_for_dev(dev); + if (!domain || !domain->iova_cookie) + return -EINVAL; + + iovad = &((struct iommu_dma_cookie *)domain->iova_cookie)->iovad; + + /* iova will be freed automatically by put_iova_domain() */ + pfn_lo = iova_pfn(iovad, base); + pfn_hi = iova_pfn(iovad, base + size - 1); + if (!reserve_iova(iovad, pfn_lo, pfn_hi)) + return -EINVAL; + + return 0; +} +EXPORT_SYMBOL_GPL(iommu_dma_reserve_iova); + +/* + * Should be called prior to using dma-apis. + */ +int iommu_dma_enable_best_fit_algo(struct device *dev) +{ + struct iommu_domain *domain; + struct iova_domain *iovad; + + domain = iommu_get_domain_for_dev(dev); + if (!domain || !domain->iova_cookie) + return -EINVAL; + + iovad = &((struct iommu_dma_cookie *)domain->iova_cookie)->iovad; + iovad->best_fit = true; + return 0; +} +EXPORT_SYMBOL_GPL(iommu_dma_enable_best_fit_algo); + /** * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API * page flags. @@ -332,6 +377,15 @@ int dma_info_to_prot(enum dma_data_direction dir, bool coherent, if (attrs & DMA_ATTR_PRIVILEGED) prot |= IOMMU_PRIV; + if (!(attrs & DMA_ATTR_EXEC_MAPPING)) + prot |= IOMMU_NOEXEC; + + if (attrs & DMA_ATTR_IOMMU_USE_UPSTREAM_HINT) + prot |= IOMMU_USE_UPSTREAM_HINT; + + if (attrs & DMA_ATTR_IOMMU_USE_LLC_NWA) + prot |= IOMMU_USE_LLC_NWA; + switch (dir) { case DMA_BIDIRECTIONAL: return prot | IOMMU_READ | IOMMU_WRITE; @@ -350,6 +404,7 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain, struct iommu_dma_cookie *cookie = domain->iova_cookie; struct iova_domain *iovad = &cookie->iovad; unsigned long shift, iova_len, iova = 0; + dma_addr_t limit; if (cookie->type == IOMMU_DMA_MSI_COOKIE) { cookie->msi_iova += size; @@ -373,16 +428,27 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain, if (domain->geometry.force_aperture) dma_limit = min(dma_limit, domain->geometry.aperture_end); + /* + * Ensure iova is within range specified in iommu_dma_init_domain(). + * This also prevents unnecessary work iterating through the entire + * rb_tree. + */ + limit = min_t(dma_addr_t, DMA_BIT_MASK(32) >> shift, + iovad->end_pfn); + /* Try to get PCI devices a SAC address */ if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev)) - iova = alloc_iova_fast(iovad, iova_len, - DMA_BIT_MASK(32) >> shift, false); + iova = alloc_iova_fast(iovad, iova_len, limit, false); - if (!iova) - iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift, - true); + if (!iova) { + limit = min_t(dma_addr_t, dma_limit >> shift, + iovad->end_pfn); + + iova = alloc_iova_fast(iovad, iova_len, limit, true); + } return (dma_addr_t)iova << shift; + } static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie, @@ -453,8 +519,9 @@ static struct page **__iommu_dma_alloc_pages(unsigned int count, unsigned int order = __fls(order_mask); order_size = 1U << order; - page = alloc_pages((order_mask - order_size) ? - gfp | __GFP_NORETRY : gfp, order); + page = alloc_pages(order ? + (gfp | __GFP_NORETRY) & + ~__GFP_RECLAIM : gfp, order); if (!page) continue; if (!order) @@ -648,7 +715,7 @@ void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, * avoid individually crossing any boundaries, so we merely need to check a * segment's start address to avoid concatenating across one. */ -static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents, +int iommu_dma_finalise_sg(struct device *dev, struct scatterlist *sg, int nents, dma_addr_t dma_addr) { struct scatterlist *s, *cur = sg; @@ -701,7 +768,7 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents, * If mapping failed, then just restore the original list, * but making sure the DMA fields are invalidated. */ -static void __invalidate_sg(struct scatterlist *sg, int nents) +void iommu_dma_invalidate_sg(struct scatterlist *sg, int nents) { struct scatterlist *s; int i; @@ -723,14 +790,10 @@ static void __invalidate_sg(struct scatterlist *sg, int nents) * impedance-matching, to be able to hand off a suitably-aligned list, * but still preserve the original offsets and sizes for the caller. */ -int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, - int nents, int prot) +size_t iommu_dma_prepare_map_sg(struct device *dev, struct iova_domain *iovad, + struct scatterlist *sg, int nents) { - struct iommu_domain *domain = iommu_get_domain_for_dev(dev); - struct iommu_dma_cookie *cookie = domain->iova_cookie; - struct iova_domain *iovad = &cookie->iovad; struct scatterlist *s, *prev = NULL; - dma_addr_t iova; size_t iova_len = 0; unsigned long mask = dma_get_seg_boundary(dev); int i; @@ -774,6 +837,26 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, prev = s; } + return iova_len; +} + +int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, + int nents, int prot) +{ + struct iommu_domain *domain; + struct iommu_dma_cookie *cookie; + struct iova_domain *iovad; + dma_addr_t iova; + size_t iova_len; + + domain = iommu_get_domain_for_dev(dev); + if (!domain) + return 0; + cookie = domain->iova_cookie; + iovad = &cookie->iovad; + + iova_len = iommu_dma_prepare_map_sg(dev, iovad, sg, nents); + iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev); if (!iova) goto out_restore_sg; @@ -785,12 +868,12 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, if (iommu_map_sg(domain, iova, sg, nents, prot) < iova_len) goto out_free_iova; - return __finalise_sg(dev, sg, nents, iova); + return iommu_dma_finalise_sg(dev, sg, nents, iova); out_free_iova: iommu_dma_free_iova(cookie, iova, iova_len); out_restore_sg: - __invalidate_sg(sg, nents); + iommu_dma_invalidate_sg(sg, nents); return 0; } diff --git a/drivers/iommu/dma-mapping-fast.c b/drivers/iommu/dma-mapping-fast.c new file mode 100644 index 0000000000000000000000000000000000000000..c3b5e82858c3af83b17d560e0b13324f9ea7a2e6 --- /dev/null +++ b/drivers/iommu/dma-mapping-fast.c @@ -0,0 +1,1249 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* some redundant definitions... :( TODO: move to io-pgtable-fast.h */ +#define FAST_PAGE_SHIFT 12 +#define FAST_PAGE_SIZE (1UL << FAST_PAGE_SHIFT) +#define FAST_PAGE_MASK (~(PAGE_SIZE - 1)) + +#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K +static struct gen_pool *atomic_pool __ro_after_init; + +static size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE; + +static int __init early_coherent_pool(char *p) +{ + atomic_pool_size = memparse(p, &p); + return 0; +} +early_param("coherent_pool", early_coherent_pool); + +static pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot, + bool coherent) +{ + if (attrs & DMA_ATTR_STRONGLY_ORDERED) + return pgprot_noncached(prot); + else if (!coherent || (attrs & DMA_ATTR_WRITE_COMBINE)) + return pgprot_writecombine(prot); + return prot; +} + +static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags) +{ + unsigned long val; + void *ptr = NULL; + + if (!atomic_pool) { + WARN(1, "coherent pool not initialised!\n"); + return NULL; + } + + val = gen_pool_alloc(atomic_pool, size); + if (val) { + phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val); + + *ret_page = phys_to_page(phys); + ptr = (void *)val; + memset(ptr, 0, size); + } + + return ptr; +} + +static phys_addr_t __atomic_get_phys(void *addr) +{ + return gen_pool_virt_to_phys(atomic_pool, (unsigned long)addr); +} + +static bool __in_atomic_pool(void *start, size_t size) +{ + if (!atomic_pool) + return false; + return addr_in_gen_pool(atomic_pool, (unsigned long)start, size); +} + +static int __free_from_pool(void *start, size_t size) +{ + if (!__in_atomic_pool(start, size)) + return 0; + + gen_pool_free(atomic_pool, (unsigned long)start, size); + + return 1; +} + +static bool is_dma_coherent(struct device *dev, unsigned long attrs) +{ + bool is_coherent; + + if (attrs & DMA_ATTR_FORCE_COHERENT) + is_coherent = true; + else if (attrs & DMA_ATTR_FORCE_NON_COHERENT) + is_coherent = false; + else if (is_device_dma_coherent(dev)) + is_coherent = true; + else + is_coherent = false; + + return is_coherent; +} + +static struct dma_fast_smmu_mapping *dev_get_mapping(struct device *dev) +{ + struct iommu_domain *domain; + + domain = iommu_get_domain_for_dev(dev); + if (!domain) + return ERR_PTR(-EINVAL); + return domain->iova_cookie; +} + +/* + * Checks if the allocated range (ending at @end) covered the upcoming + * stale bit. We don't need to know exactly where the range starts since + * we already know where the candidate search range started. If, starting + * from the beginning of the candidate search range, we had to step over + * (or landed directly on top of) the upcoming stale bit, then we return + * true. + * + * Due to wrapping, there are two scenarios we'll need to check: (1) if the + * range [search_start, upcoming_stale] spans 0 (i.e. search_start > + * upcoming_stale), and, (2) if the range: [search_start, upcoming_stale] + * does *not* span 0 (i.e. search_start <= upcoming_stale). And for each + * of those two scenarios we need to handle three cases: (1) the bit was + * found before wrapping or + */ +static bool __bit_covered_stale(unsigned long upcoming_stale, + unsigned long search_start, + unsigned long end) +{ + if (search_start > upcoming_stale) { + if (end >= search_start) { + /* + * We started searching above upcoming_stale and we + * didn't wrap, so we couldn't have crossed + * upcoming_stale. + */ + return false; + } + /* + * We wrapped. Did we cross (or land on top of) + * upcoming_stale? + */ + return end >= upcoming_stale; + } + + if (search_start <= upcoming_stale) { + if (end >= search_start) { + /* + * We didn't wrap. Did we cross (or land on top + * of) upcoming_stale? + */ + return end >= upcoming_stale; + } + /* + * We wrapped. So we must have crossed upcoming_stale + * (since we started searching below it). + */ + return true; + } + + /* we should have covered all logical combinations... */ + WARN_ON(1); + return true; +} + +static dma_addr_t __fast_smmu_alloc_iova(struct dma_fast_smmu_mapping *mapping, + unsigned long attrs, + size_t size) +{ + unsigned long bit, prev_search_start, nbits = size >> FAST_PAGE_SHIFT; + unsigned long align = (1 << get_order(size)) - 1; + + bit = bitmap_find_next_zero_area( + mapping->bitmap, mapping->num_4k_pages, mapping->next_start, + nbits, align); + if (unlikely(bit > mapping->num_4k_pages)) { + /* try wrapping */ + bit = bitmap_find_next_zero_area( + mapping->bitmap, mapping->num_4k_pages, 0, nbits, + align); + if (unlikely(bit > mapping->num_4k_pages)) + return DMA_ERROR_CODE; + } + + bitmap_set(mapping->bitmap, bit, nbits); + prev_search_start = mapping->next_start; + mapping->next_start = bit + nbits; + if (unlikely(mapping->next_start >= mapping->num_4k_pages)) + mapping->next_start = 0; + + /* + * If we just re-allocated a VA whose TLB hasn't been invalidated + * since it was last used and unmapped, we need to invalidate it + * here. We actually invalidate the entire TLB so that we don't + * have to invalidate the TLB again until we wrap back around. + */ + if (mapping->have_stale_tlbs && + __bit_covered_stale(mapping->upcoming_stale_bit, + prev_search_start, + bit + nbits - 1)) { + bool skip_sync = (attrs & DMA_ATTR_SKIP_CPU_SYNC); + + iommu_tlbiall(mapping->domain); + mapping->have_stale_tlbs = false; + av8l_fast_clear_stale_ptes(mapping->pgtbl_ops, + mapping->domain->geometry.aperture_start, + mapping->base, + mapping->base + mapping->size - 1, + skip_sync); + } + + return (bit << FAST_PAGE_SHIFT) + mapping->base; +} + +/* + * Checks whether the candidate bit will be allocated sooner than the + * current upcoming stale bit. We can say candidate will be upcoming + * sooner than the current upcoming stale bit if it lies between the + * starting bit of the next search range and the upcoming stale bit + * (allowing for wrap-around). + * + * Stated differently, we're checking the relative ordering of three + * unsigned numbers. So we need to check all 6 (i.e. 3!) permutations, + * namely: + * + * 0 |---A---B---C---| TOP (Case 1) + * 0 |---A---C---B---| TOP (Case 2) + * 0 |---B---A---C---| TOP (Case 3) + * 0 |---B---C---A---| TOP (Case 4) + * 0 |---C---A---B---| TOP (Case 5) + * 0 |---C---B---A---| TOP (Case 6) + * + * Note that since we're allowing numbers to wrap, the following three + * scenarios are all equivalent for Case 1: + * + * 0 |---A---B---C---| TOP + * 0 |---C---A---B---| TOP (C has wrapped. This is Case 5.) + * 0 |---B---C---A---| TOP (C and B have wrapped. This is Case 4.) + * + * In any of these cases, if we start searching from A, we will find B + * before we find C. + * + * We can also find two equivalent cases for Case 2: + * + * 0 |---A---C---B---| TOP + * 0 |---B---A---C---| TOP (B has wrapped. This is Case 3.) + * 0 |---C---B---A---| TOP (B and C have wrapped. This is Case 6.) + * + * In any of these cases, if we start searching from A, we will find C + * before we find B. + */ +static bool __bit_is_sooner(unsigned long candidate, + struct dma_fast_smmu_mapping *mapping) +{ + unsigned long A = mapping->next_start; + unsigned long B = candidate; + unsigned long C = mapping->upcoming_stale_bit; + + if ((A < B && B < C) || /* Case 1 */ + (C < A && A < B) || /* Case 5 */ + (B < C && C < A)) /* Case 4 */ + return true; + + if ((A < C && C < B) || /* Case 2 */ + (B < A && A < C) || /* Case 3 */ + (C < B && B < A)) /* Case 6 */ + return false; + + /* + * For simplicity, we've been ignoring the possibility of any of + * our three numbers being equal. Handle those cases here (they + * shouldn't happen very often, (I think?)). + */ + + /* + * If candidate is the next bit to be searched then it's definitely + * sooner. + */ + if (A == B) + return true; + + /* + * If candidate is the next upcoming stale bit we'll return false + * to avoid doing `upcoming = candidate' in the caller (which would + * be useless since they're already equal) + */ + if (B == C) + return false; + + /* + * If next start is the upcoming stale bit then candidate can't + * possibly be sooner. The "soonest" bit is already selected. + */ + if (A == C) + return false; + + /* We should have covered all logical combinations. */ + WARN(1, "Well, that's awkward. A=%ld, B=%ld, C=%ld\n", A, B, C); + return true; +} + +#ifdef CONFIG_ARM64 +static int __init atomic_pool_init(void) +{ + pgprot_t prot = __pgprot(PROT_NORMAL_NC); + unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT; + struct page *page; + void *addr; + unsigned int pool_size_order = get_order(atomic_pool_size); + + if (dev_get_cma_area(NULL)) + page = dma_alloc_from_contiguous(NULL, nr_pages, + pool_size_order, false); + else + page = alloc_pages(GFP_DMA32, pool_size_order); + + if (page) { + int ret; + void *page_addr = page_address(page); + + memset(page_addr, 0, atomic_pool_size); + __dma_flush_area(page_addr, atomic_pool_size); + + atomic_pool = gen_pool_create(PAGE_SHIFT, -1); + if (!atomic_pool) + goto free_page; + + addr = dma_common_contiguous_remap(page, atomic_pool_size, + VM_USERMAP, prot, atomic_pool_init); + + if (!addr) + goto destroy_genpool; + + ret = gen_pool_add_virt(atomic_pool, (unsigned long)addr, + page_to_phys(page), + atomic_pool_size, -1); + if (ret) + goto remove_mapping; + + gen_pool_set_algo(atomic_pool, + gen_pool_first_fit_order_align, + NULL); + + pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n", + atomic_pool_size / 1024); + return 0; + } + goto out; + +remove_mapping: + dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP, false); +destroy_genpool: + gen_pool_destroy(atomic_pool); + atomic_pool = NULL; +free_page: + if (!dma_release_from_contiguous(NULL, page, nr_pages)) + __free_pages(page, pool_size_order); +out: + pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n", + atomic_pool_size / 1024); + return -ENOMEM; +} +arch_initcall(atomic_pool_init); +#endif + +static void __fast_smmu_free_iova(struct dma_fast_smmu_mapping *mapping, + dma_addr_t iova, size_t size) +{ + unsigned long start_bit = (iova - mapping->base) >> FAST_PAGE_SHIFT; + unsigned long nbits = size >> FAST_PAGE_SHIFT; + + /* + * We don't invalidate TLBs on unmap. We invalidate TLBs on map + * when we're about to re-allocate a VA that was previously + * unmapped but hasn't yet been invalidated. So we need to keep + * track of which bit is the closest to being re-allocated here. + */ + if (__bit_is_sooner(start_bit, mapping)) + mapping->upcoming_stale_bit = start_bit; + + bitmap_clear(mapping->bitmap, start_bit, nbits); + mapping->have_stale_tlbs = true; +} + + +static void __fast_dma_page_cpu_to_dev(struct page *page, unsigned long off, + size_t size, enum dma_data_direction dir) +{ + __dma_map_area(page_address(page) + off, size, dir); +} + +static void __fast_dma_page_dev_to_cpu(struct page *page, unsigned long off, + size_t size, enum dma_data_direction dir) +{ + __dma_unmap_area(page_address(page) + off, size, dir); + + /* TODO: WHAT IS THIS? */ + /* + * Mark the D-cache clean for this page to avoid extra flushing. + */ + if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE) + set_bit(PG_dcache_clean, &page->flags); +} + +static dma_addr_t fast_smmu_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction dir, + unsigned long attrs) +{ + struct dma_fast_smmu_mapping *mapping = dev_get_mapping(dev); + dma_addr_t iova; + unsigned long flags; + phys_addr_t phys_plus_off = page_to_phys(page) + offset; + phys_addr_t phys_to_map = round_down(phys_plus_off, FAST_PAGE_SIZE); + unsigned long offset_from_phys_to_map = phys_plus_off & ~FAST_PAGE_MASK; + size_t len = ALIGN(size + offset_from_phys_to_map, FAST_PAGE_SIZE); + bool skip_sync = (attrs & DMA_ATTR_SKIP_CPU_SYNC); + bool is_coherent = is_dma_coherent(dev, attrs); + int prot = dma_info_to_prot(dir, is_coherent, attrs); + + if (!skip_sync && !is_coherent) + __fast_dma_page_cpu_to_dev(phys_to_page(phys_to_map), + offset_from_phys_to_map, size, dir); + + spin_lock_irqsave(&mapping->lock, flags); + + iova = __fast_smmu_alloc_iova(mapping, attrs, len); + + if (unlikely(iova == DMA_ERROR_CODE)) + goto fail; + + if (unlikely(av8l_fast_map_public(mapping->pgtbl_ops, iova, + phys_to_map, len, prot))) + goto fail_free_iova; + + spin_unlock_irqrestore(&mapping->lock, flags); + + trace_map(mapping->domain, iova, phys_to_map, len, prot); + return iova + offset_from_phys_to_map; + +fail_free_iova: + __fast_smmu_free_iova(mapping, iova, size); +fail: + spin_unlock_irqrestore(&mapping->lock, flags); + return DMA_ERROR_CODE; +} + +static void fast_smmu_unmap_page(struct device *dev, dma_addr_t iova, + size_t size, enum dma_data_direction dir, + unsigned long attrs) +{ + struct dma_fast_smmu_mapping *mapping = dev_get_mapping(dev); + unsigned long flags; + unsigned long offset = iova & ~FAST_PAGE_MASK; + size_t len = ALIGN(size + offset, FAST_PAGE_SIZE); + bool skip_sync = (attrs & DMA_ATTR_SKIP_CPU_SYNC); + bool is_coherent = is_dma_coherent(dev, attrs); + + if (!skip_sync && !is_coherent) { + phys_addr_t phys; + + phys = av8l_fast_iova_to_phys_public(mapping->pgtbl_ops, iova); + WARN_ON(!phys); + + __fast_dma_page_dev_to_cpu(phys_to_page(phys), offset, + size, dir); + } + + spin_lock_irqsave(&mapping->lock, flags); + av8l_fast_unmap_public(mapping->pgtbl_ops, iova, len); + __fast_smmu_free_iova(mapping, iova, len); + spin_unlock_irqrestore(&mapping->lock, flags); + + trace_unmap(mapping->domain, iova - offset, len, len); +} + +static void fast_smmu_sync_single_for_cpu(struct device *dev, + dma_addr_t iova, size_t size, enum dma_data_direction dir) +{ + struct dma_fast_smmu_mapping *mapping = dev_get_mapping(dev); + unsigned long offset = iova & ~FAST_PAGE_MASK; + + if (!av8l_fast_iova_coherent_public(mapping->pgtbl_ops, iova)) { + phys_addr_t phys; + + phys = av8l_fast_iova_to_phys_public(mapping->pgtbl_ops, iova); + WARN_ON(!phys); + + __fast_dma_page_dev_to_cpu(phys_to_page(phys), offset, + size, dir); + } +} + +static void fast_smmu_sync_single_for_device(struct device *dev, + dma_addr_t iova, size_t size, enum dma_data_direction dir) +{ + struct dma_fast_smmu_mapping *mapping = dev_get_mapping(dev); + unsigned long offset = iova & ~FAST_PAGE_MASK; + + if (!av8l_fast_iova_coherent_public(mapping->pgtbl_ops, iova)) { + phys_addr_t phys; + + phys = av8l_fast_iova_to_phys_public(mapping->pgtbl_ops, iova); + WARN_ON(!phys); + + __fast_dma_page_cpu_to_dev(phys_to_page(phys), offset, + size, dir); + } +} + +static void fast_smmu_sync_sg_for_cpu(struct device *dev, + struct scatterlist *sgl, int nelems, + enum dma_data_direction dir) +{ + struct scatterlist *sg; + dma_addr_t iova = sg_dma_address(sgl); + struct dma_fast_smmu_mapping *mapping = dev_get_mapping(dev); + int i; + + if (av8l_fast_iova_coherent_public(mapping->pgtbl_ops, iova)) + return; + + for_each_sg(sgl, sg, nelems, i) + __dma_unmap_area(sg_virt(sg), sg->length, dir); +} + +static void fast_smmu_sync_sg_for_device(struct device *dev, + struct scatterlist *sgl, int nelems, + enum dma_data_direction dir) +{ + struct scatterlist *sg; + dma_addr_t iova = sg_dma_address(sgl); + struct dma_fast_smmu_mapping *mapping = dev_get_mapping(dev); + int i; + + if (av8l_fast_iova_coherent_public(mapping->pgtbl_ops, iova)) + return; + + for_each_sg(sgl, sg, nelems, i) + __dma_map_area(sg_virt(sg), sg->length, dir); +} + +static int fast_smmu_map_sg(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, + unsigned long attrs) +{ + struct dma_fast_smmu_mapping *mapping = dev_get_mapping(dev); + size_t iova_len; + bool is_coherent = is_dma_coherent(dev, attrs); + int prot = dma_info_to_prot(dir, is_coherent, attrs); + int ret; + dma_addr_t iova; + unsigned long flags; + size_t unused; + + iova_len = iommu_dma_prepare_map_sg(dev, mapping->iovad, sg, nents); + + spin_lock_irqsave(&mapping->lock, flags); + iova = __fast_smmu_alloc_iova(mapping, attrs, iova_len); + spin_unlock_irqrestore(&mapping->lock, flags); + + if (unlikely(iova == DMA_ERROR_CODE)) + goto fail; + + av8l_fast_map_sg_public(mapping->pgtbl_ops, iova, sg, nents, prot, + &unused); + + ret = iommu_dma_finalise_sg(dev, sg, nents, iova); + + if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) + fast_smmu_sync_sg_for_device(dev, sg, nents, dir); + + return ret; +fail: + iommu_dma_invalidate_sg(sg, nents); + return 0; +} + +static void fast_smmu_unmap_sg(struct device *dev, + struct scatterlist *sg, int nelems, + enum dma_data_direction dir, + unsigned long attrs) +{ + struct dma_fast_smmu_mapping *mapping = dev_get_mapping(dev); + unsigned long flags; + dma_addr_t start; + size_t len; + struct scatterlist *tmp; + int i; + + if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) + fast_smmu_sync_sg_for_cpu(dev, sg, nelems, dir); + + /* + * The scatterlist segments are mapped into a single + * contiguous IOVA allocation, so this is incredibly easy. + */ + start = sg_dma_address(sg); + for_each_sg(sg_next(sg), tmp, nelems - 1, i) { + if (sg_dma_len(tmp) == 0) + break; + sg = tmp; + } + len = sg_dma_address(sg) + sg_dma_len(sg) - start; + + av8l_fast_unmap_public(mapping->pgtbl_ops, start, len); + + spin_lock_irqsave(&mapping->lock, flags); + __fast_smmu_free_iova(mapping, start, len); + spin_unlock_irqrestore(&mapping->lock, flags); +} + +static void __fast_smmu_free_pages(struct page **pages, int count) +{ + int i; + + if (!pages) + return; + for (i = 0; i < count; i++) + __free_page(pages[i]); + kvfree(pages); +} + +static void *fast_smmu_alloc_atomic(struct dma_fast_smmu_mapping *mapping, + size_t size, gfp_t gfp, unsigned long attrs, + dma_addr_t *handle, bool coherent) +{ + void *addr; + unsigned long flags; + struct page *page; + dma_addr_t dma_addr; + int prot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs); + + if (coherent) { + page = alloc_pages(gfp, get_order(size)); + addr = page ? page_address(page) : NULL; + } else + addr = __alloc_from_pool(size, &page, gfp); + if (!addr) + return NULL; + + spin_lock_irqsave(&mapping->lock, flags); + dma_addr = __fast_smmu_alloc_iova(mapping, attrs, size); + if (dma_addr == DMA_ERROR_CODE) { + dev_err(mapping->dev, "no iova\n"); + spin_unlock_irqrestore(&mapping->lock, flags); + goto out_free_page; + } + if (unlikely(av8l_fast_map_public(mapping->pgtbl_ops, dma_addr, + page_to_phys(page), size, prot))) { + dev_err(mapping->dev, "no map public\n"); + goto out_free_iova; + } + spin_unlock_irqrestore(&mapping->lock, flags); + *handle = dma_addr; + return addr; + +out_free_iova: + __fast_smmu_free_iova(mapping, dma_addr, size); + spin_unlock_irqrestore(&mapping->lock, flags); +out_free_page: + coherent ? __free_pages(page, get_order(size)) : + __free_from_pool(addr, size); + return NULL; +} + +static struct page **__fast_smmu_alloc_pages(unsigned int count, gfp_t gfp) +{ + struct page **pages; + unsigned int i = 0, array_size = count * sizeof(*pages); + + if (array_size <= PAGE_SIZE) + pages = kzalloc(array_size, GFP_KERNEL); + else + pages = vzalloc(array_size); + if (!pages) + return NULL; + + /* IOMMU can map any pages, so himem can also be used here */ + gfp |= __GFP_NOWARN | __GFP_HIGHMEM; + + for (i = 0; i < count; ++i) { + struct page *page = alloc_page(gfp); + + if (!page) { + __fast_smmu_free_pages(pages, i); + return NULL; + } + pages[i] = page; + } + return pages; +} + +static void *__fast_smmu_alloc_contiguous(struct device *dev, size_t size, + dma_addr_t *handle, gfp_t gfp, unsigned long attrs) +{ + struct dma_fast_smmu_mapping *mapping = dev_get_mapping(dev); + bool is_coherent = is_dma_coherent(dev, attrs); + int prot = dma_info_to_prot(DMA_BIDIRECTIONAL, is_coherent, attrs); + pgprot_t remap_prot = __get_dma_pgprot(attrs, PAGE_KERNEL, is_coherent); + struct page *page; + dma_addr_t iova; + unsigned long flags; + void *coherent_addr; + + page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT, + get_order(size), gfp & __GFP_NOWARN); + if (!page) + return NULL; + + + spin_lock_irqsave(&mapping->lock, flags); + iova = __fast_smmu_alloc_iova(mapping, attrs, size); + spin_unlock_irqrestore(&mapping->lock, flags); + if (iova == DMA_ERROR_CODE) + goto release_page; + + if (av8l_fast_map_public(mapping->pgtbl_ops, iova, page_to_phys(page), + size, prot)) + goto release_iova; + + coherent_addr = dma_common_contiguous_remap(page, size, VM_USERMAP, + remap_prot, __fast_smmu_alloc_contiguous); + if (!coherent_addr) + goto release_mapping; + + if (!is_coherent) + __dma_flush_area(page_to_virt(page), size); + + *handle = iova; + return coherent_addr; + +release_mapping: + av8l_fast_unmap_public(mapping->pgtbl_ops, iova, size); +release_iova: + __fast_smmu_free_iova(mapping, iova, size); +release_page: + dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT); + return NULL; +} + +static void *fast_smmu_alloc(struct device *dev, size_t size, + dma_addr_t *handle, gfp_t gfp, + unsigned long attrs) +{ + struct dma_fast_smmu_mapping *mapping = dev_get_mapping(dev); + struct sg_table sgt; + dma_addr_t dma_addr, iova_iter; + void *addr; + unsigned long flags; + struct sg_mapping_iter miter; + size_t count = ALIGN(size, SZ_4K) >> PAGE_SHIFT; + bool is_coherent = is_dma_coherent(dev, attrs); + int prot = dma_info_to_prot(DMA_BIDIRECTIONAL, is_coherent, attrs); + pgprot_t remap_prot = __get_dma_pgprot(attrs, PAGE_KERNEL, is_coherent); + struct page **pages; + + /* + * sg_alloc_table_from_pages accepts unsigned int value for count + * so check count doesn't exceed UINT_MAX. + */ + + if (count > UINT_MAX) { + dev_err(dev, "count: %zx exceeds UNIT_MAX\n", count); + return NULL; + } + + *handle = DMA_ERROR_CODE; + size = ALIGN(size, SZ_4K); + + if (atomic_pool && !gfpflags_allow_blocking(gfp)) + return fast_smmu_alloc_atomic(mapping, size, gfp, attrs, handle, + is_coherent); + else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) + return __fast_smmu_alloc_contiguous(dev, size, handle, gfp, + attrs); + + pages = __fast_smmu_alloc_pages(count, gfp); + if (!pages) { + dev_err(dev, "no pages\n"); + return NULL; + } + + if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, gfp)) { + dev_err(dev, "no sg tablen\n"); + goto out_free_pages; + } + + if (!is_coherent) { + /* + * The CPU-centric flushing implied by SG_MITER_TO_SG isn't + * sufficient here, so skip it by using the "wrong" direction. + */ + sg_miter_start(&miter, sgt.sgl, sgt.orig_nents, + SG_MITER_FROM_SG); + while (sg_miter_next(&miter)) + __dma_flush_area(miter.addr, miter.length); + sg_miter_stop(&miter); + } + + spin_lock_irqsave(&mapping->lock, flags); + dma_addr = __fast_smmu_alloc_iova(mapping, attrs, size); + if (dma_addr == DMA_ERROR_CODE) { + dev_err(dev, "no iova\n"); + spin_unlock_irqrestore(&mapping->lock, flags); + goto out_free_sg; + } + iova_iter = dma_addr; + sg_miter_start(&miter, sgt.sgl, sgt.orig_nents, + SG_MITER_FROM_SG | SG_MITER_ATOMIC); + while (sg_miter_next(&miter)) { + if (unlikely(av8l_fast_map_public( + mapping->pgtbl_ops, iova_iter, + page_to_phys(miter.page), + miter.length, prot))) { + dev_err(dev, "no map public\n"); + /* TODO: unwind previously successful mappings */ + goto out_free_iova; + } + iova_iter += miter.length; + } + sg_miter_stop(&miter); + spin_unlock_irqrestore(&mapping->lock, flags); + + addr = dma_common_pages_remap(pages, size, VM_USERMAP, remap_prot, + __builtin_return_address(0)); + if (!addr) { + dev_err(dev, "no common pages\n"); + goto out_unmap; + } + + *handle = dma_addr; + sg_free_table(&sgt); + return addr; + +out_unmap: + /* need to take the lock again for page tables and iova */ + spin_lock_irqsave(&mapping->lock, flags); + av8l_fast_unmap_public(mapping->pgtbl_ops, dma_addr, size); +out_free_iova: + __fast_smmu_free_iova(mapping, dma_addr, size); + spin_unlock_irqrestore(&mapping->lock, flags); +out_free_sg: + sg_free_table(&sgt); +out_free_pages: + __fast_smmu_free_pages(pages, count); + return NULL; +} + +static void fast_smmu_free(struct device *dev, size_t size, + void *cpu_addr, dma_addr_t dma_handle, + unsigned long attrs) +{ + struct dma_fast_smmu_mapping *mapping = dev_get_mapping(dev); + struct vm_struct *area; + unsigned long flags; + + size = ALIGN(size, FAST_PAGE_SIZE); + + spin_lock_irqsave(&mapping->lock, flags); + av8l_fast_unmap_public(mapping->pgtbl_ops, dma_handle, size); + __fast_smmu_free_iova(mapping, dma_handle, size); + spin_unlock_irqrestore(&mapping->lock, flags); + + area = find_vm_area(cpu_addr); + if (area && area->pages) { + struct page **pages = area->pages; + + dma_common_free_remap(cpu_addr, size, VM_USERMAP, false); + __fast_smmu_free_pages(pages, size >> FAST_PAGE_SHIFT); + } else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) { + struct page *page = vmalloc_to_page(cpu_addr); + + dma_common_free_remap(cpu_addr, size, VM_USERMAP, false); + dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT); + } else if (!is_vmalloc_addr(cpu_addr)) { + __free_pages(virt_to_page(cpu_addr), get_order(size)); + } else if (__in_atomic_pool(cpu_addr, size)) { + // Keep remap + __free_from_pool(cpu_addr, size); + } +} + +/* __swiotlb_mmap_pfn is not currently exported. */ +static int fast_smmu_mmap_pfn(struct vm_area_struct *vma, unsigned long pfn, + size_t size) +{ + int ret = -ENXIO; + unsigned long nr_vma_pages = vma_pages(vma); + unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; + unsigned long off = vma->vm_pgoff; + + if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) { + ret = remap_pfn_range(vma, vma->vm_start, pfn + off, + vma->vm_end - vma->vm_start, + vma->vm_page_prot); + } + + return ret; +} + +static int fast_smmu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, + size_t size, unsigned long attrs) +{ + struct vm_struct *area; + bool coherent = is_dma_coherent(dev, attrs); + unsigned long pfn = 0; + + vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot, + coherent); + area = find_vm_area(cpu_addr); + if (area && area->pages) + return iommu_dma_mmap(area->pages, size, vma); + else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) + pfn = vmalloc_to_pfn(cpu_addr); + else if (!is_vmalloc_addr(cpu_addr)) + pfn = page_to_pfn(virt_to_page(cpu_addr)); + else if (__in_atomic_pool(cpu_addr, size)) + pfn = __atomic_get_phys(cpu_addr) >> PAGE_SHIFT; + + + if (pfn) + return fast_smmu_mmap_pfn(vma, pfn, size); + + return -EINVAL; +} + +static int fast_smmu_get_sgtable(struct device *dev, struct sg_table *sgt, + void *cpu_addr, dma_addr_t dma_addr, + size_t size, unsigned long attrs) +{ + unsigned int n_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; + struct vm_struct *area; + struct page *page = NULL; + int ret = -ENXIO; + + area = find_vm_area(cpu_addr); + if (area && area->pages) + return sg_alloc_table_from_pages(sgt, area->pages, n_pages, 0, + size, GFP_KERNEL); + else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) + page = vmalloc_to_page(cpu_addr); + else if (!is_vmalloc_addr(cpu_addr)) + page = virt_to_page(cpu_addr); + else if (__in_atomic_pool(cpu_addr, size)) + page = phys_to_page(__atomic_get_phys(cpu_addr)); + + if (page) { + ret = sg_alloc_table(sgt, 1, GFP_KERNEL); + if (!ret) + sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); + } + + return ret; +} + +static dma_addr_t fast_smmu_dma_map_resource( + struct device *dev, phys_addr_t phys_addr, + size_t size, enum dma_data_direction dir, + unsigned long attrs) +{ + struct dma_fast_smmu_mapping *mapping = dev_get_mapping(dev); + size_t offset = phys_addr & ~FAST_PAGE_MASK; + size_t len = round_up(size + offset, FAST_PAGE_SIZE); + dma_addr_t dma_addr; + int prot; + unsigned long flags; + + spin_lock_irqsave(&mapping->lock, flags); + dma_addr = __fast_smmu_alloc_iova(mapping, attrs, len); + spin_unlock_irqrestore(&mapping->lock, flags); + + if (dma_addr == DMA_ERROR_CODE) + return dma_addr; + + prot = dma_info_to_prot(dir, false, attrs); + prot |= IOMMU_MMIO; + + if (iommu_map(mapping->domain, dma_addr, phys_addr - offset, + len, prot)) { + spin_lock_irqsave(&mapping->lock, flags); + __fast_smmu_free_iova(mapping, dma_addr, len); + spin_unlock_irqrestore(&mapping->lock, flags); + return DMA_ERROR_CODE; + } + return dma_addr + offset; +} + +static void fast_smmu_dma_unmap_resource( + struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir, + unsigned long attrs) +{ + struct dma_fast_smmu_mapping *mapping = dev_get_mapping(dev); + size_t offset = addr & ~FAST_PAGE_MASK; + size_t len = round_up(size + offset, FAST_PAGE_SIZE); + unsigned long flags; + + iommu_unmap(mapping->domain, addr - offset, len); + spin_lock_irqsave(&mapping->lock, flags); + __fast_smmu_free_iova(mapping, addr, len); + spin_unlock_irqrestore(&mapping->lock, flags); +} + +static int fast_smmu_mapping_error(struct device *dev, + dma_addr_t dma_addr) +{ + return dma_addr == DMA_ERROR_CODE; +} + +static void __fast_smmu_mapped_over_stale(struct dma_fast_smmu_mapping *fast, + void *data) +{ + av8l_fast_iopte *pmds, *ptep = data; + dma_addr_t iova; + unsigned long bitmap_idx; + struct io_pgtable *tbl; + + tbl = container_of(fast->pgtbl_ops, struct io_pgtable, ops); + pmds = tbl->cfg.av8l_fast_cfg.pmds; + + bitmap_idx = (unsigned long)(ptep - pmds); + iova = bitmap_idx << FAST_PAGE_SHIFT; + dev_err(fast->dev, "Mapped over stale tlb at %pa\n", &iova); + dev_err(fast->dev, "bitmap (failure at idx %lu):\n", bitmap_idx); + dev_err(fast->dev, "ptep: %p pmds: %p diff: %lu\n", ptep, + pmds, bitmap_idx); + print_hex_dump(KERN_ERR, "bmap: ", DUMP_PREFIX_ADDRESS, + 32, 8, fast->bitmap, fast->bitmap_size, false); +} + +static int fast_smmu_notify(struct notifier_block *self, + unsigned long action, void *data) +{ + struct dma_fast_smmu_mapping *fast = container_of( + self, struct dma_fast_smmu_mapping, notifier); + + switch (action) { + case MAPPED_OVER_STALE_TLB: + __fast_smmu_mapped_over_stale(fast, data); + return NOTIFY_OK; + default: + WARN(1, "Unhandled notifier action"); + return NOTIFY_DONE; + } +} + +static const struct dma_map_ops fast_smmu_dma_ops = { + .alloc = fast_smmu_alloc, + .free = fast_smmu_free, + .mmap = fast_smmu_mmap_attrs, + .get_sgtable = fast_smmu_get_sgtable, + .map_page = fast_smmu_map_page, + .unmap_page = fast_smmu_unmap_page, + .sync_single_for_cpu = fast_smmu_sync_single_for_cpu, + .sync_single_for_device = fast_smmu_sync_single_for_device, + .map_sg = fast_smmu_map_sg, + .unmap_sg = fast_smmu_unmap_sg, + .sync_sg_for_cpu = fast_smmu_sync_sg_for_cpu, + .sync_sg_for_device = fast_smmu_sync_sg_for_device, + .map_resource = fast_smmu_dma_map_resource, + .unmap_resource = fast_smmu_dma_unmap_resource, + .mapping_error = fast_smmu_mapping_error, +}; + +/** + * __fast_smmu_create_mapping_sized + * @base: bottom of the VA range + * @size: size of the VA range in bytes + * + * Creates a mapping structure which holds information about used/unused IO + * address ranges, which is required to perform mapping with IOMMU aware + * functions. The only VA range supported is [0, 4GB]. + * + * The client device need to be attached to the mapping with + * fast_smmu_attach_device function. + */ +static struct dma_fast_smmu_mapping *__fast_smmu_create_mapping_sized( + dma_addr_t base, u64 size) +{ + struct dma_fast_smmu_mapping *fast; + + fast = kzalloc(sizeof(struct dma_fast_smmu_mapping), GFP_KERNEL); + if (!fast) + goto err; + + fast->base = base; + fast->size = size; + fast->num_4k_pages = size >> FAST_PAGE_SHIFT; + fast->bitmap_size = BITS_TO_LONGS(fast->num_4k_pages) * sizeof(long); + + fast->bitmap = kzalloc(fast->bitmap_size, GFP_KERNEL | __GFP_NOWARN | + __GFP_NORETRY); + if (!fast->bitmap) + fast->bitmap = vzalloc(fast->bitmap_size); + + if (!fast->bitmap) + goto err2; + + spin_lock_init(&fast->lock); + + fast->iovad = kzalloc(sizeof(*fast->iovad), GFP_KERNEL); + if (!fast->iovad) + goto err_free_bitmap; + init_iova_domain(fast->iovad, FAST_PAGE_SIZE, + base >> FAST_PAGE_SHIFT); + + return fast; + +err_free_bitmap: + kvfree(fast->bitmap); +err2: + kfree(fast); +err: + return ERR_PTR(-ENOMEM); +} + +/* + * Based off of similar code from dma-iommu.c, but modified to use a different + * iova allocator + */ +static void fast_smmu_reserve_pci_windows(struct device *dev, + struct dma_fast_smmu_mapping *mapping) +{ + struct pci_host_bridge *bridge; + struct resource_entry *window; + phys_addr_t start, end; + struct pci_dev *pci_dev; + unsigned long flags; + + if (!dev_is_pci(dev)) + return; + + pci_dev = to_pci_dev(dev); + bridge = pci_find_host_bridge(pci_dev->bus); + + spin_lock_irqsave(&mapping->lock, flags); + resource_list_for_each_entry(window, &bridge->windows) { + if (resource_type(window->res) != IORESOURCE_MEM && + resource_type(window->res) != IORESOURCE_IO) + continue; + + start = round_down(window->res->start - window->offset, + FAST_PAGE_SIZE); + end = round_up(window->res->end - window->offset, + FAST_PAGE_SIZE); + start = max_t(unsigned long, mapping->base, start); + end = min_t(unsigned long, mapping->base + mapping->size, end); + if (start >= end) + continue; + + dev_dbg(dev, "iova allocator reserved 0x%pa-0x%pa\n", + &start, &end); + + start = (start - mapping->base) >> FAST_PAGE_SHIFT; + end = (end - mapping->base) >> FAST_PAGE_SHIFT; + bitmap_set(mapping->bitmap, start, end - start); + } + spin_unlock_irqrestore(&mapping->lock, flags); +} + +void fast_smmu_put_dma_cookie(struct iommu_domain *domain) +{ + struct dma_fast_smmu_mapping *fast = domain->iova_cookie; + + if (!fast) + return; + + if (fast->iovad) { + put_iova_domain(fast->iovad); + kfree(fast->iovad); + } + + if (fast->bitmap) + kvfree(fast->bitmap); + + kfree(fast); + domain->iova_cookie = NULL; +} +EXPORT_SYMBOL_GPL(fast_smmu_put_dma_cookie); + +/** + * fast_smmu_init_mapping + * @dev: valid struct device pointer + * @mapping: io address space mapping structure (returned from + * arm_iommu_create_mapping) + * + * Called the first time a device is attached to this mapping. + * Not for dma client use. + */ +int fast_smmu_init_mapping(struct device *dev, + struct dma_iommu_mapping *mapping) +{ + int err = 0; + struct iommu_domain *domain = mapping->domain; + struct iommu_pgtbl_info info; + u64 size = (u64)mapping->bits << PAGE_SHIFT; + struct dma_fast_smmu_mapping *fast; + + if (domain->iova_cookie) { + fast = domain->iova_cookie; + goto finish; + } + + if (mapping->base + size > (SZ_1G * 4ULL)) { + dev_err(dev, "Iova end address too large\n"); + return -EINVAL; + } + + fast = __fast_smmu_create_mapping_sized(mapping->base, size); + if (IS_ERR(fast)) + return -ENOMEM; + + fast->domain = domain; + fast->dev = dev; + domain->iova_cookie = fast; + + domain->geometry.aperture_start = mapping->base; + domain->geometry.aperture_end = mapping->base + size - 1; + + if (iommu_domain_get_attr(domain, DOMAIN_ATTR_PGTBL_INFO, + &info)) { + dev_err(dev, "Couldn't get page table info\n"); + err = -EINVAL; + goto release_mapping; + } + fast->pgtbl_ops = (struct io_pgtable_ops *)info.ops; + + fast->notifier.notifier_call = fast_smmu_notify; + av8l_register_notify(&fast->notifier); + +finish: + fast_smmu_reserve_pci_windows(dev, fast); + mapping->ops = &fast_smmu_dma_ops; + return 0; + +release_mapping: + fast_smmu_put_dma_cookie(domain); + return err; +} diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c index 5944d3b4dca37a9c555cef2e01a5cdcbd23c90b0..ef3aadec980eecc0db478ff058f7c8ad0a493bd5 100644 --- a/drivers/iommu/intel-svm.c +++ b/drivers/iommu/intel-svm.c @@ -620,14 +620,15 @@ static irqreturn_t prq_event_thread(int irq, void *d) * any faults on kernel addresses. */ if (!svm->mm) goto bad_req; - /* If the mm is already defunct, don't handle faults. */ - if (!mmget_not_zero(svm->mm)) - goto bad_req; /* If address is not canonical, return invalid response */ if (!is_canonical_address(address)) goto bad_req; + /* If the mm is already defunct, don't handle faults. */ + if (!mmget_not_zero(svm->mm)) + goto bad_req; + down_read(&svm->mm->mmap_sem); vma = find_extend_vma(svm->mm, address); if (!vma || address < vma->vm_start) diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index d04aa36be37723572d28f42d6deb5800f2d697a5..7e6b3459aeb1109cdc59d1fac7f2d86ccae892eb 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -21,10 +21,10 @@ #define pr_fmt(fmt) "arm-lpae io-pgtable: " fmt #include -#include #include #include #include +#include #include #include #include @@ -32,7 +32,7 @@ #include -#define ARM_LPAE_MAX_ADDR_BITS 52 +#define ARM_LPAE_MAX_ADDR_BITS 48 #define ARM_LPAE_S2_MAX_CONCAT_PAGES 16 #define ARM_LPAE_MAX_LEVELS 4 @@ -69,6 +69,9 @@ #define ARM_LPAE_PGD_IDX(l,d) \ ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0) +#define ARM_LPAE_LVL_MASK(l, d) \ + ((l) == ARM_LPAE_START_LVL(d) ? (1 << (d)->pgd_bits) - 1 : \ + (1 << (d)->bits_per_level) - 1) #define ARM_LPAE_LVL_IDX(a,l,d) \ (((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \ ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1)) @@ -86,8 +89,7 @@ #define ARM_LPAE_PTE_TYPE_TABLE 3 #define ARM_LPAE_PTE_TYPE_PAGE 3 -#define ARM_LPAE_PTE_ADDR_MASK GENMASK_ULL(47,12) - +#define ARM_LPAE_PTE_SH_MASK (((arm_lpae_iopte)0x3) << 8) #define ARM_LPAE_PTE_NSTABLE (((arm_lpae_iopte)1) << 63) #define ARM_LPAE_PTE_XN (((arm_lpae_iopte)3) << 53) #define ARM_LPAE_PTE_AF (((arm_lpae_iopte)1) << 10) @@ -106,8 +108,11 @@ #define ARM_LPAE_PTE_SW_SYNC (((arm_lpae_iopte)1) << 55) /* Stage-1 PTE */ +#define ARM_LPAE_PTE_AP_PRIV_RW (((arm_lpae_iopte)0) << 6) #define ARM_LPAE_PTE_AP_UNPRIV (((arm_lpae_iopte)1) << 6) -#define ARM_LPAE_PTE_AP_RDONLY (((arm_lpae_iopte)2) << 6) +#define ARM_LPAE_PTE_AP_PRIV_RO (((arm_lpae_iopte)2) << 6) +#define ARM_LPAE_PTE_AP_RO (((arm_lpae_iopte)3) << 6) +#define ARM_LPAE_PTE_ATTRINDX_MASK 0x7 #define ARM_LPAE_PTE_ATTRINDX_SHIFT 2 #define ARM_LPAE_PTE_nG (((arm_lpae_iopte)1) << 11) @@ -161,19 +166,25 @@ #define ARM_LPAE_TCR_PS_42_BIT 0x3ULL #define ARM_LPAE_TCR_PS_44_BIT 0x4ULL #define ARM_LPAE_TCR_PS_48_BIT 0x5ULL -#define ARM_LPAE_TCR_PS_52_BIT 0x6ULL #define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3) +#define ARM_LPAE_MAIR1_ATTR_SHIFT(n) ((n-4) << 3) #define ARM_LPAE_MAIR_ATTR_MASK 0xff #define ARM_LPAE_MAIR_ATTR_DEVICE 0x04 #define ARM_LPAE_MAIR_ATTR_NC 0x44 #define ARM_LPAE_MAIR_ATTR_WBRWA 0xff +#define ARM_LPAE_MAIR_ATTR_UPSTREAM 0xf4 +#define ARM_LPAE_MAIR_ATTR_LLC_NWA 0xe4 #define ARM_LPAE_MAIR_ATTR_IDX_NC 0 #define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1 #define ARM_LPAE_MAIR_ATTR_IDX_DEV 2 +#define ARM_LPAE_MAIR_ATTR_IDX_UPSTREAM 3 +#define ARM_LPAE_MAIR_ATTR_IDX_LLC_NWA 0x4ULL /* IOPTE accessors */ -#define iopte_deref(pte,d) __va(iopte_to_paddr(pte, d)) +#define iopte_deref(pte, d) \ + (__va(iopte_val(pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1) \ + & ~(ARM_LPAE_GRANULE(d) - 1ULL))) #define iopte_type(pte,l) \ (((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK) @@ -185,38 +196,95 @@ (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_PAGE) : \ (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_BLOCK)) +#define iopte_to_pfn(pte, d) \ + (((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1)) >> (d)->pg_shift) + +#define pfn_to_iopte(pfn, d) \ + (((pfn) << (d)->pg_shift) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1)) + struct arm_lpae_io_pgtable { struct io_pgtable iop; int levels; + unsigned int pgd_bits; size_t pgd_size; unsigned long pg_shift; unsigned long bits_per_level; void *pgd; + void *pgd_ttbr1; }; typedef u64 arm_lpae_iopte; -static arm_lpae_iopte paddr_to_iopte(phys_addr_t paddr, - struct arm_lpae_io_pgtable *data) +/* + * We'll use some ignored bits in table entries to keep track of the number + * of page mappings beneath the table. The maximum number of entries + * beneath any table mapping in armv8 is 8192 (which is possible at the + * 2nd- and 3rd-level when using a 64K granule size). The bits at our + * disposal are: + * + * 4k granule: [54..52], [11..2] + * 64k granule: [54..52], [15..2] + * + * [54..52], [11..2] is enough bits for tracking table mappings at any + * level for any granule, so we'll use those. + */ +#define BOTTOM_IGNORED_MASK 0x3ff +#define BOTTOM_IGNORED_SHIFT 2 +#define BOTTOM_IGNORED_NUM_BITS 10 +#define TOP_IGNORED_MASK 0x7ULL +#define TOP_IGNORED_SHIFT 52 +#define IOPTE_RESERVED_MASK ((BOTTOM_IGNORED_MASK << BOTTOM_IGNORED_SHIFT) | \ + (TOP_IGNORED_MASK << TOP_IGNORED_SHIFT)) + +static arm_lpae_iopte iopte_val(arm_lpae_iopte table_pte) +{ + return table_pte & ~IOPTE_RESERVED_MASK; +} + +static arm_lpae_iopte _iopte_bottom_ignored_val(arm_lpae_iopte table_pte) +{ + return (table_pte & (BOTTOM_IGNORED_MASK << BOTTOM_IGNORED_SHIFT)) + >> BOTTOM_IGNORED_SHIFT; +} + +static arm_lpae_iopte _iopte_top_ignored_val(arm_lpae_iopte table_pte) { - arm_lpae_iopte pte = paddr; + return (table_pte & (TOP_IGNORED_MASK << TOP_IGNORED_SHIFT)) + >> TOP_IGNORED_SHIFT; +} - /* Of the bits which overlap, either 51:48 or 15:12 are always RES0 */ - return (pte | (pte >> (48 - 12))) & ARM_LPAE_PTE_ADDR_MASK; +static int iopte_tblcnt(arm_lpae_iopte table_pte) +{ + return (_iopte_bottom_ignored_val(table_pte) | + (_iopte_top_ignored_val(table_pte) << BOTTOM_IGNORED_NUM_BITS)); } -static phys_addr_t iopte_to_paddr(arm_lpae_iopte pte, - struct arm_lpae_io_pgtable *data) +static void iopte_tblcnt_set(arm_lpae_iopte *table_pte, int val) { - u64 paddr = pte & ARM_LPAE_PTE_ADDR_MASK; + arm_lpae_iopte pte = iopte_val(*table_pte); - if (data->pg_shift < 16) - return paddr; + pte |= ((val & BOTTOM_IGNORED_MASK) << BOTTOM_IGNORED_SHIFT) | + (((val & (TOP_IGNORED_MASK << BOTTOM_IGNORED_NUM_BITS)) + >> BOTTOM_IGNORED_NUM_BITS) << TOP_IGNORED_SHIFT); + *table_pte = pte; +} - /* Rotate the packed high-order bits back to the top */ - return (paddr | (paddr << (48 - 12))) & (ARM_LPAE_PTE_ADDR_MASK << 4); +static void iopte_tblcnt_sub(arm_lpae_iopte *table_ptep, int cnt) +{ + arm_lpae_iopte current_cnt = iopte_tblcnt(*table_ptep); + + current_cnt -= cnt; + iopte_tblcnt_set(table_ptep, current_cnt); +} + +static void iopte_tblcnt_add(arm_lpae_iopte *table_ptep, int cnt) +{ + arm_lpae_iopte current_cnt = iopte_tblcnt(*table_ptep); + + current_cnt += cnt; + iopte_tblcnt_set(table_ptep, current_cnt); } static bool selftest_running = false; @@ -226,22 +294,27 @@ static dma_addr_t __arm_lpae_dma_addr(void *pages) return (dma_addr_t)virt_to_phys(pages); } +static inline void pgtable_dma_sync_single_for_device( + struct io_pgtable_cfg *cfg, + dma_addr_t addr, size_t size, + enum dma_data_direction dir) +{ + if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) + dma_sync_single_for_device(cfg->iommu_dev, addr, size, + dir); +} + static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp, - struct io_pgtable_cfg *cfg) + struct io_pgtable_cfg *cfg, void *cookie) { struct device *dev = cfg->iommu_dev; - int order = get_order(size); - struct page *p; dma_addr_t dma; - void *pages; + void *pages = io_pgtable_alloc_pages_exact(cfg, cookie, size, + gfp | __GFP_ZERO); - VM_BUG_ON((gfp & __GFP_HIGHMEM)); - p = alloc_pages_node(dev ? dev_to_node(dev) : NUMA_NO_NODE, - gfp | __GFP_ZERO, order); - if (!p) + if (!pages) return NULL; - pages = page_address(p); if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) { dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE); if (dma_mapping_error(dev, dma)) @@ -261,23 +334,23 @@ static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp, dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n"); dma_unmap_single(dev, dma, size, DMA_TO_DEVICE); out_free: - __free_pages(p, order); + io_pgtable_free_pages_exact(cfg, cookie, pages, size); return NULL; } static void __arm_lpae_free_pages(void *pages, size_t size, - struct io_pgtable_cfg *cfg) + struct io_pgtable_cfg *cfg, void *cookie) { if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages), size, DMA_TO_DEVICE); - free_pages((unsigned long)pages, get_order(size)); + io_pgtable_free_pages_exact(cfg, cookie, pages, size); } static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep, struct io_pgtable_cfg *cfg) { - dma_sync_single_for_device(cfg->iommu_dev, __arm_lpae_dma_addr(ptep), + pgtable_dma_sync_single_for_device(cfg, __arm_lpae_dma_addr(ptep), sizeof(*ptep), DMA_TO_DEVICE); } @@ -294,9 +367,11 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, unsigned long iova, size_t size, int lvl, arm_lpae_iopte *ptep); + static void __arm_lpae_init_pte(struct arm_lpae_io_pgtable *data, phys_addr_t paddr, arm_lpae_iopte prot, - int lvl, arm_lpae_iopte *ptep) + int lvl, arm_lpae_iopte *ptep, + bool flush) { arm_lpae_iopte pte = prot; @@ -308,50 +383,48 @@ static void __arm_lpae_init_pte(struct arm_lpae_io_pgtable *data, else pte |= ARM_LPAE_PTE_TYPE_BLOCK; - pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_IS; - pte |= paddr_to_iopte(paddr, data); + pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_OS; + pte |= pfn_to_iopte(paddr >> data->pg_shift, data); - __arm_lpae_set_pte(ptep, pte, &data->iop.cfg); + if (flush) + __arm_lpae_set_pte(ptep, pte, &data->iop.cfg); + else + *ptep = pte; } static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data, unsigned long iova, phys_addr_t paddr, arm_lpae_iopte prot, int lvl, - arm_lpae_iopte *ptep) + arm_lpae_iopte *ptep, arm_lpae_iopte *prev_ptep, + bool flush) { arm_lpae_iopte pte = *ptep; - if (iopte_leaf(pte, lvl)) { - /* We require an unmap first */ - WARN_ON(!selftest_running); + /* We require an unmap first */ + if (pte & ARM_LPAE_PTE_VALID) { + WARN_RATELIMIT(1, "map without unmap\n"); return -EEXIST; - } else if (iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_TABLE) { - /* - * We need to unmap and free the old table before - * overwriting it with a block entry. - */ - arm_lpae_iopte *tblp; - size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data); - - tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data); - if (WARN_ON(__arm_lpae_unmap(data, iova, sz, lvl, tblp) != sz)) - return -EINVAL; } - __arm_lpae_init_pte(data, paddr, prot, lvl, ptep); + __arm_lpae_init_pte(data, paddr, prot, lvl, ptep, flush); + + if (prev_ptep) + iopte_tblcnt_add(prev_ptep, 1); return 0; } static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table, arm_lpae_iopte *ptep, arm_lpae_iopte curr, - struct io_pgtable_cfg *cfg) + struct io_pgtable_cfg *cfg, + int ref_count) { arm_lpae_iopte old, new; new = __pa(table) | ARM_LPAE_PTE_TYPE_TABLE; if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS) new |= ARM_LPAE_PTE_NSTABLE; + iopte_tblcnt_set(&new, ref_count); /* * Ensure the table itself is visible before its PTE can be. @@ -374,21 +447,69 @@ static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table, return old; } +struct map_state { + unsigned long iova_end; + unsigned int pgsize; + arm_lpae_iopte *pgtable; + arm_lpae_iopte *prev_pgtable; + arm_lpae_iopte *pte_start; + unsigned int num_pte; +}; +/* map state optimization works at level 3 (the 2nd-to-last level) */ +#define MAP_STATE_LVL 3 + static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, phys_addr_t paddr, size_t size, arm_lpae_iopte prot, - int lvl, arm_lpae_iopte *ptep) + int lvl, arm_lpae_iopte *ptep, + arm_lpae_iopte *prev_ptep, struct map_state *ms) { arm_lpae_iopte *cptep, pte; size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data); size_t tblsz = ARM_LPAE_GRANULE(data); struct io_pgtable_cfg *cfg = &data->iop.cfg; + void *cookie = data->iop.cookie; + arm_lpae_iopte *pgtable = ptep; /* Find our entry at the current level */ ptep += ARM_LPAE_LVL_IDX(iova, lvl, data); /* If we can install a leaf entry at this level, then do so */ - if (size == block_size && (size & cfg->pgsize_bitmap)) - return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep); + if (size == block_size && (size & cfg->pgsize_bitmap)) { + if (!ms) + return arm_lpae_init_pte(data, iova, paddr, prot, lvl, + ptep, prev_ptep, true); + + if (lvl == MAP_STATE_LVL) { + if (ms->pgtable) + pgtable_dma_sync_single_for_device(cfg, + __arm_lpae_dma_addr(ms->pte_start), + ms->num_pte * sizeof(*ptep), + DMA_TO_DEVICE); + + ms->iova_end = round_down(iova, SZ_2M) + SZ_2M; + ms->pgtable = pgtable; + ms->prev_pgtable = prev_ptep; + ms->pgsize = size; + ms->pte_start = ptep; + ms->num_pte = 1; + } else { + /* + * We have some map state from previous page + * mappings, but we're about to set up a block + * mapping. Flush out the previous page mappings. + */ + if (ms->pgtable) + pgtable_dma_sync_single_for_device(cfg, + __arm_lpae_dma_addr(ms->pte_start), + ms->num_pte * sizeof(*ptep), + DMA_TO_DEVICE); + memset(ms, 0, sizeof(*ms)); + ms = NULL; + } + + return arm_lpae_init_pte(data, iova, paddr, prot, lvl, + ptep, prev_ptep, ms == NULL); + } /* We can't allocate tables at the final level */ if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1)) @@ -397,13 +518,14 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, /* Grab a pointer to the next level */ pte = READ_ONCE(*ptep); if (!pte) { - cptep = __arm_lpae_alloc_pages(tblsz, GFP_ATOMIC, cfg); + cptep = __arm_lpae_alloc_pages(tblsz, GFP_ATOMIC, cfg, cookie); if (!cptep) return -ENOMEM; - pte = arm_lpae_install_table(cptep, ptep, 0, cfg); + pte = arm_lpae_install_table(cptep, ptep, 0, cfg, 0); if (pte) - __arm_lpae_free_pages(cptep, tblsz, cfg); + __arm_lpae_free_pages(cptep, tblsz, cfg, cookie); + } else if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA) && !(pte & ARM_LPAE_PTE_SW_SYNC)) { __arm_lpae_sync_pte(ptep, cfg); @@ -418,7 +540,8 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, } /* Rinse, repeat */ - return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep); + return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep, + ptep, ms); } static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data, @@ -430,8 +553,12 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data, data->iop.fmt == ARM_32_LPAE_S1) { pte = ARM_LPAE_PTE_nG; - if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ)) - pte |= ARM_LPAE_PTE_AP_RDONLY; + if (prot & IOMMU_WRITE) + pte |= (prot & IOMMU_PRIV) ? ARM_LPAE_PTE_AP_PRIV_RW + : ARM_LPAE_PTE_AP_UNPRIV; + else + pte |= (prot & IOMMU_PRIV) ? ARM_LPAE_PTE_AP_PRIV_RO + : ARM_LPAE_PTE_AP_RO; if (!(prot & IOMMU_PRIV)) pte |= ARM_LPAE_PTE_AP_UNPRIV; @@ -442,6 +569,12 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data, else if (prot & IOMMU_CACHE) pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE << ARM_LPAE_PTE_ATTRINDX_SHIFT); + else if (prot & IOMMU_USE_UPSTREAM_HINT) + pte |= (ARM_LPAE_MAIR_ATTR_IDX_UPSTREAM + << ARM_LPAE_PTE_ATTRINDX_SHIFT); + else if (prot & IOMMU_USE_LLC_NWA) + pte |= (ARM_LPAE_MAIR_ATTR_IDX_LLC_NWA + << ARM_LPAE_PTE_ATTRINDX_SHIFT); } else { pte = ARM_LPAE_PTE_HAP_FAULT; if (prot & IOMMU_READ) @@ -479,7 +612,8 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova, return -ERANGE; prot = arm_lpae_prot_to_pte(data, iommu_prot); - ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep); + ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep, NULL, + NULL); /* * Synchronise all PTE updates for the new mapping before there's * a chance for anything to kick off a table walk for the new iova. @@ -489,11 +623,97 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova, return ret; } +static int arm_lpae_map_sg(struct io_pgtable_ops *ops, unsigned long iova, + struct scatterlist *sg, unsigned int nents, + int iommu_prot, size_t *size) +{ + struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); + arm_lpae_iopte *ptep = data->pgd; + int lvl = ARM_LPAE_START_LVL(data); + arm_lpae_iopte prot; + struct scatterlist *s; + size_t mapped = 0; + int i; + int ret = -EINVAL; + unsigned int min_pagesz; + struct io_pgtable_cfg *cfg = &data->iop.cfg; + struct map_state ms; + + /* If no access, then nothing to do */ + if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE))) + goto out_err; + + prot = arm_lpae_prot_to_pte(data, iommu_prot); + + min_pagesz = 1 << __ffs(cfg->pgsize_bitmap); + + memset(&ms, 0, sizeof(ms)); + + for_each_sg(sg, s, nents, i) { + phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset; + size_t size = s->length; + + /* + * We are mapping on IOMMU page boundaries, so offset within + * the page must be 0. However, the IOMMU may support pages + * smaller than PAGE_SIZE, so s->offset may still represent + * an offset of that boundary within the CPU page. + */ + if (!IS_ALIGNED(s->offset, min_pagesz)) + goto out_err; + + while (size) { + size_t pgsize = iommu_pgsize( + cfg->pgsize_bitmap, iova | phys, size); + + if (ms.pgtable && (iova < ms.iova_end)) { + arm_lpae_iopte *ptep = ms.pgtable + + ARM_LPAE_LVL_IDX(iova, MAP_STATE_LVL, + data); + arm_lpae_init_pte( + data, iova, phys, prot, MAP_STATE_LVL, + ptep, ms.prev_pgtable, false); + ms.num_pte++; + } else { + ret = __arm_lpae_map(data, iova, phys, pgsize, + prot, lvl, ptep, NULL, &ms); + if (ret) + goto out_err; + } + + iova += pgsize; + mapped += pgsize; + phys += pgsize; + size -= pgsize; + } + } + + if (ms.pgtable) + pgtable_dma_sync_single_for_device(cfg, + __arm_lpae_dma_addr(ms.pte_start), + ms.num_pte * sizeof(*ms.pte_start), + DMA_TO_DEVICE); + + /* + * Synchronise all PTE updates for the new mapping before there's + * a chance for anything to kick off a table walk for the new iova. + */ + wmb(); + + return mapped; + +out_err: + /* Return the size of the partial mapping so that they can be undone */ + *size = mapped; + return ret; +} + static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl, arm_lpae_iopte *ptep) { arm_lpae_iopte *start, *end; unsigned long table_size; + void *cookie = data->iop.cookie; if (lvl == ARM_LPAE_START_LVL(data)) table_size = data->pgd_size; @@ -517,7 +737,7 @@ static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl, __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data)); } - __arm_lpae_free_pages(start, table_size, &data->iop.cfg); + __arm_lpae_free_pages(start, table_size, &data->iop.cfg, cookie); } static void arm_lpae_free_pgtable(struct io_pgtable *iop) @@ -525,6 +745,8 @@ static void arm_lpae_free_pgtable(struct io_pgtable *iop) struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop); __arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data), data->pgd); + __arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data), + data->pgd_ttbr1); kfree(data); } @@ -539,18 +761,22 @@ static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data, size_t tablesz = ARM_LPAE_GRANULE(data); size_t split_sz = ARM_LPAE_BLOCK_SIZE(lvl, data); int i, unmap_idx = -1; + void *cookie = data->iop.cookie; + int child_cnt = 0; + + size = iommu_pgsize(data->iop.cfg.pgsize_bitmap, iova, size); if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS)) return 0; - tablep = __arm_lpae_alloc_pages(tablesz, GFP_ATOMIC, cfg); + tablep = __arm_lpae_alloc_pages(tablesz, GFP_ATOMIC, cfg, cookie); if (!tablep) return 0; /* Bytes unmapped */ if (size == split_sz) unmap_idx = ARM_LPAE_LVL_IDX(iova, lvl, data); - blk_paddr = iopte_to_paddr(blk_pte, data); + blk_paddr = iopte_to_pfn(blk_pte, data) << data->pg_shift; pte = iopte_prot(blk_pte); for (i = 0; i < tablesz / sizeof(pte); i++, blk_paddr += split_sz) { @@ -558,12 +784,14 @@ static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data, if (i == unmap_idx) continue; - __arm_lpae_init_pte(data, blk_paddr, pte, lvl, &tablep[i]); + __arm_lpae_init_pte(data, blk_paddr, pte, lvl, &tablep[i], + true); + child_cnt++; } - pte = arm_lpae_install_table(tablep, ptep, blk_pte, cfg); + pte = arm_lpae_install_table(tablep, ptep, blk_pte, cfg, child_cnt); if (pte != blk_pte) { - __arm_lpae_free_pages(tablep, tablesz, cfg); + __arm_lpae_free_pages(tablep, tablesz, cfg, cookie); /* * We may race against someone unmapping another part of this * block, but anything else is invalid. We can't misinterpret @@ -603,16 +831,44 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, if (!iopte_leaf(pte, lvl)) { /* Also flush any partial walks */ - io_pgtable_tlb_add_flush(iop, iova, size, - ARM_LPAE_GRANULE(data), false); - io_pgtable_tlb_sync(iop); ptep = iopte_deref(pte, data); __arm_lpae_free_pgtable(data, lvl + 1, ptep); - } else { - io_pgtable_tlb_add_flush(iop, iova, size, size, true); } return size; + } else if ((lvl == ARM_LPAE_MAX_LEVELS - 2) && !iopte_leaf(pte, lvl)) { + arm_lpae_iopte *table = iopte_deref(pte, data); + arm_lpae_iopte *table_base = table; + int tl_offset = ARM_LPAE_LVL_IDX(iova, lvl + 1, data); + int entry_size = ARM_LPAE_GRANULE(data); + int max_entries = ARM_LPAE_BLOCK_SIZE(lvl, data) >> + data->pg_shift; + int entries = min_t(int, size / entry_size, + max_entries - tl_offset); + int table_len = entries * sizeof(*table); + + /* + * This isn't a block mapping so it must be a table mapping + * and since it's the 2nd-to-last level the next level has + * to be all page mappings. Zero them all out in one fell + * swoop. + */ + + table += tl_offset; + + memset(table, 0, table_len); + pgtable_dma_sync_single_for_device(&iop->cfg, + __arm_lpae_dma_addr(table), + table_len, DMA_TO_DEVICE); + + iopte_tblcnt_sub(ptep, entries); + if (!iopte_tblcnt(*ptep)) { + /* no valid mappings left under this table. free it. */ + __arm_lpae_set_pte(ptep, 0, &iop->cfg); + __arm_lpae_free_pgtable(data, lvl + 1, table_base); + } + + return entries * entry_size; } else if (iopte_leaf(pte, lvl)) { /* * Insert a table at the next level to map the old region, @@ -628,8 +884,9 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, } static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova, - size_t size) + size_t size) { + size_t unmapped = 0; struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); arm_lpae_iopte *ptep = data->pgd; int lvl = ARM_LPAE_START_LVL(data); @@ -637,49 +894,133 @@ static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova, if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias))) return 0; - return __arm_lpae_unmap(data, iova, size, lvl, ptep); + while (unmapped < size) { + size_t ret, size_to_unmap, remaining; + + remaining = (size - unmapped); + size_to_unmap = iommu_pgsize(data->iop.cfg.pgsize_bitmap, iova, + remaining); + size_to_unmap = size_to_unmap >= SZ_2M ? + size_to_unmap : + min_t(unsigned long, remaining, + (ALIGN(iova + 1, SZ_2M) - iova)); + ret = __arm_lpae_unmap(data, iova, size_to_unmap, lvl, ptep); + if (ret == 0) + break; + unmapped += ret; + iova += ret; + } + + if (unmapped) + io_pgtable_tlb_flush_all(&data->iop); + + return unmapped; } -static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops, - unsigned long iova) +static int arm_lpae_iova_to_pte(struct arm_lpae_io_pgtable *data, + unsigned long iova, int *plvl_ret, + arm_lpae_iopte *ptep_ret) { - struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); arm_lpae_iopte pte, *ptep = data->pgd; - int lvl = ARM_LPAE_START_LVL(data); + *plvl_ret = ARM_LPAE_START_LVL(data); + *ptep_ret = 0; do { /* Valid IOPTE pointer? */ if (!ptep) - return 0; + return -EINVAL; /* Grab the IOPTE we're interested in */ - ptep += ARM_LPAE_LVL_IDX(iova, lvl, data); - pte = READ_ONCE(*ptep); + pte = *(ptep + ARM_LPAE_LVL_IDX(iova, *plvl_ret, data)); /* Valid entry? */ if (!pte) - return 0; + return -EINVAL; /* Leaf entry? */ - if (iopte_leaf(pte,lvl)) + if (iopte_leaf(pte, *plvl_ret)) goto found_translation; /* Take it to the next level */ ptep = iopte_deref(pte, data); - } while (++lvl < ARM_LPAE_MAX_LEVELS); + } while (++(*plvl_ret) < ARM_LPAE_MAX_LEVELS); /* Ran out of page tables to walk */ - return 0; + return -EINVAL; found_translation: - iova &= (ARM_LPAE_BLOCK_SIZE(lvl, data) - 1); - return iopte_to_paddr(pte, data) | iova; + *ptep_ret = pte; + return 0; +} + +static uint64_t arm_lpae_iova_get_pte(struct io_pgtable_ops *ops, + unsigned long iova) +{ + struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); + arm_lpae_iopte pte; + int lvl; + + if (!arm_lpae_iova_to_pte(data, iova, &lvl, &pte)) + return pte; + + return 0; +} + +static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops, + unsigned long iova) +{ + struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); + arm_lpae_iopte pte; + int lvl; + phys_addr_t phys = 0; + + if (!arm_lpae_iova_to_pte(data, iova, &lvl, &pte)) { + iova &= ((1 << ARM_LPAE_LVL_SHIFT(lvl, data)) - 1); + phys = ((phys_addr_t)iopte_to_pfn(pte, data) + << data->pg_shift) | iova; + } + + return phys; +} + +static bool __arm_lpae_is_iova_coherent(struct arm_lpae_io_pgtable *data, + arm_lpae_iopte *ptep) +{ + if (data->iop.fmt == ARM_64_LPAE_S1 || + data->iop.fmt == ARM_32_LPAE_S1) { + int attr_idx = (*ptep & (ARM_LPAE_PTE_ATTRINDX_MASK << + ARM_LPAE_PTE_ATTRINDX_SHIFT)) >> + ARM_LPAE_PTE_ATTRINDX_SHIFT; + if ((attr_idx == ARM_LPAE_MAIR_ATTR_IDX_CACHE) && + (((*ptep & ARM_LPAE_PTE_SH_MASK) == ARM_LPAE_PTE_SH_IS) + || + (*ptep & ARM_LPAE_PTE_SH_MASK) == ARM_LPAE_PTE_SH_OS)) + return true; + } else { + if (*ptep & ARM_LPAE_PTE_MEMATTR_OIWB) + return true; + } + + return false; +} + +static bool arm_lpae_is_iova_coherent(struct io_pgtable_ops *ops, + unsigned long iova) +{ + struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); + arm_lpae_iopte pte; + int lvl; + bool ret = false; + + if (!arm_lpae_iova_to_pte(data, iova, &lvl, &pte)) + ret = __arm_lpae_is_iova_coherent(data, &pte); + + return ret; } static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg) { - unsigned long granule, page_sizes; - unsigned int max_addr_bits = 48; + unsigned long granule; /* * We need to restrict the supported page sizes to match the @@ -699,24 +1040,17 @@ static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg) switch (granule) { case SZ_4K: - page_sizes = (SZ_4K | SZ_2M | SZ_1G); + cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G); break; case SZ_16K: - page_sizes = (SZ_16K | SZ_32M); + cfg->pgsize_bitmap &= (SZ_16K | SZ_32M); break; case SZ_64K: - max_addr_bits = 52; - page_sizes = (SZ_64K | SZ_512M); - if (cfg->oas > 48) - page_sizes |= 1ULL << 42; /* 4TB */ + cfg->pgsize_bitmap &= (SZ_64K | SZ_512M); break; default: - page_sizes = 0; + cfg->pgsize_bitmap = 0; } - - cfg->pgsize_bitmap &= page_sizes; - cfg->ias = min(cfg->ias, max_addr_bits); - cfg->oas = min(cfg->oas, max_addr_bits); } static struct arm_lpae_io_pgtable * @@ -753,12 +1087,16 @@ arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg) /* Calculate the actual size of our pgd (without concatenation) */ pgd_bits = va_bits - (data->bits_per_level * (data->levels - 1)); + data->pgd_bits = pgd_bits; data->pgd_size = 1UL << (pgd_bits + ilog2(sizeof(arm_lpae_iopte))); data->iop.ops = (struct io_pgtable_ops) { .map = arm_lpae_map, + .map_sg = arm_lpae_map_sg, .unmap = arm_lpae_unmap, .iova_to_phys = arm_lpae_iova_to_phys, + .is_iova_coherent = arm_lpae_is_iova_coherent, + .iova_to_pte = arm_lpae_iova_get_pte, }; return data; @@ -770,7 +1108,11 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie) u64 reg; struct arm_lpae_io_pgtable *data; - if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS | IO_PGTABLE_QUIRK_NO_DMA)) + if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS + | IO_PGTABLE_QUIRK_NO_DMA + | IO_PGTABLE_QUIRK_QCOM_USE_UPSTREAM_HINT + | IO_PGTABLE_QUIRK_QSMMUV500_NON_SHAREABLE + | IO_PGTABLE_QUIRK_QCOM_USE_LLC_NWA)) return NULL; data = arm_lpae_alloc_pgtable(cfg); @@ -778,9 +1120,32 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie) return NULL; /* TCR */ - reg = (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) | - (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) | - (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT); + if (cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA) + reg = (ARM_LPAE_TCR_SH_OS << ARM_LPAE_TCR_SH0_SHIFT) | + (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) | + (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT); + else if ((cfg->quirks & IO_PGTABLE_QUIRK_QCOM_USE_UPSTREAM_HINT) && + (cfg->quirks & IO_PGTABLE_QUIRK_QSMMUV500_NON_SHAREABLE)) + reg = (ARM_LPAE_TCR_SH_NS << ARM_LPAE_TCR_SH0_SHIFT) | + (ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_IRGN0_SHIFT) | + (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT); + else if (cfg->quirks & IO_PGTABLE_QUIRK_QCOM_USE_UPSTREAM_HINT) + reg = (ARM_LPAE_TCR_SH_OS << ARM_LPAE_TCR_SH0_SHIFT) | + (ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_IRGN0_SHIFT) | + (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT); + else if ((cfg->quirks & IO_PGTABLE_QUIRK_QCOM_USE_LLC_NWA) && + (cfg->quirks & IO_PGTABLE_QUIRK_QSMMUV500_NON_SHAREABLE)) + reg = (ARM_LPAE_TCR_SH_NS << ARM_LPAE_TCR_SH0_SHIFT) | + (ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_IRGN0_SHIFT) | + (ARM_LPAE_TCR_RGN_WB << ARM_LPAE_TCR_ORGN0_SHIFT); + else if (cfg->quirks & IO_PGTABLE_QUIRK_QCOM_USE_LLC_NWA) + reg = (ARM_LPAE_TCR_SH_OS << ARM_LPAE_TCR_SH0_SHIFT) | + (ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_IRGN0_SHIFT) | + (ARM_LPAE_TCR_RGN_WB << ARM_LPAE_TCR_ORGN0_SHIFT); + else + reg = (ARM_LPAE_TCR_SH_OS << ARM_LPAE_TCR_SH0_SHIFT) | + (ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_IRGN0_SHIFT) | + (ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_ORGN0_SHIFT); switch (ARM_LPAE_GRANULE(data)) { case SZ_4K: @@ -813,9 +1178,6 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie) case 48: reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_IPS_SHIFT); break; - case 52: - reg |= (ARM_LPAE_TCR_PS_52_BIT << ARM_LPAE_TCR_IPS_SHIFT); - break; default: goto out_free_data; } @@ -832,24 +1194,39 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie) (ARM_LPAE_MAIR_ATTR_WBRWA << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) | (ARM_LPAE_MAIR_ATTR_DEVICE - << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV)); + << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV)) | + (ARM_LPAE_MAIR_ATTR_UPSTREAM + << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_UPSTREAM)); cfg->arm_lpae_s1_cfg.mair[0] = reg; - cfg->arm_lpae_s1_cfg.mair[1] = 0; + + reg = ARM_LPAE_MAIR_ATTR_LLC_NWA + << ARM_LPAE_MAIR1_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_LLC_NWA); + + cfg->arm_lpae_s1_cfg.mair[1] = reg; /* Looking good; allocate a pgd */ - data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg); + data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, + cfg, cookie); if (!data->pgd) goto out_free_data; + data->pgd_ttbr1 = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, + cfg, cookie); + if (!data->pgd_ttbr1) + goto out_free_pgd; + /* Ensure the empty pgd is visible before any actual TTBR write */ wmb(); /* TTBRs */ cfg->arm_lpae_s1_cfg.ttbr[0] = virt_to_phys(data->pgd); - cfg->arm_lpae_s1_cfg.ttbr[1] = 0; + cfg->arm_lpae_s1_cfg.ttbr[1] = virt_to_phys(data->pgd_ttbr1); return &data->iop; +out_free_pgd: + __arm_lpae_free_pages(data->pgd, data->pgd_size, cfg, cookie); + out_free_data: kfree(data); return NULL; @@ -862,7 +1239,7 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie) struct arm_lpae_io_pgtable *data; /* The NS quirk doesn't apply at stage 2 */ - if (cfg->quirks & ~IO_PGTABLE_QUIRK_NO_DMA) + if (!(cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)) return NULL; data = arm_lpae_alloc_pgtable(cfg); @@ -923,9 +1300,6 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie) case 48: reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_PS_SHIFT); break; - case 52: - reg |= (ARM_LPAE_TCR_PS_52_BIT << ARM_LPAE_TCR_PS_SHIFT); - break; default: goto out_free_data; } @@ -935,7 +1309,8 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie) cfg->arm_lpae_s2_cfg.vtcr = reg; /* Allocate pgd pages */ - data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg); + data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, + cfg, cookie); if (!data->pgd) goto out_free_data; @@ -1018,7 +1393,6 @@ static void dummy_tlb_add_flush(unsigned long iova, size_t size, size_t granule, bool leaf, void *cookie) { WARN_ON(cookie != cfg_cookie); - WARN_ON(!(size & cfg_cookie->pgsize_bitmap)); } static void dummy_tlb_sync(void *cookie) @@ -1051,6 +1425,43 @@ static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops) -EFAULT; \ }) +/* + * Returns true if there's any mapping in the given iova range in ops. + */ +static bool arm_lpae_range_has_mapping(struct io_pgtable_ops *ops, + unsigned long iova_start, size_t size) +{ + unsigned long iova = iova_start; + + while (iova < (iova_start + size)) { + if (ops->iova_to_phys(ops, iova + 42)) + return true; + iova += SZ_4K; + } + return false; +} + +/* + * Returns true if the iova range is successfully mapped to the contiguous + * phys range in ops. + */ +static bool arm_lpae_range_has_specific_mapping(struct io_pgtable_ops *ops, + const unsigned long iova_start, + const phys_addr_t phys_start, + const size_t size) +{ + unsigned long iova = iova_start; + phys_addr_t phys = phys_start; + + while (iova < (iova_start + size)) { + if (ops->iova_to_phys(ops, iova + 42) != (phys + 42)) + return false; + iova += SZ_4K; + phys += SZ_4K; + } + return true; +} + static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg) { static const enum io_pgtable_fmt fmts[] = { @@ -1058,14 +1469,16 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg) ARM_64_LPAE_S2, }; - int i, j; + int i, j, k; unsigned long iova; size_t size; struct io_pgtable_ops *ops; - selftest_running = true; for (i = 0; i < ARRAY_SIZE(fmts); ++i) { + unsigned long test_sg_sizes[] = { SZ_4K, SZ_64K, SZ_2M, + SZ_1M * 12, SZ_1M * 20 }; + cfg_cookie = cfg; ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg); if (!ops) { @@ -1074,16 +1487,11 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg) } /* - * Initial sanity checks. - * Empty page tables shouldn't provide any translations. + * Initial sanity checks. Empty page tables shouldn't + * provide any translations. TODO: check entire supported + * range for these ops rather than first 2G */ - if (ops->iova_to_phys(ops, 42)) - return __FAIL(ops, i); - - if (ops->iova_to_phys(ops, SZ_1G + 42)) - return __FAIL(ops, i); - - if (ops->iova_to_phys(ops, SZ_2G + 42)) + if (arm_lpae_range_has_mapping(ops, 0, SZ_2G)) return __FAIL(ops, i); /* @@ -1104,7 +1512,8 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg) IOMMU_READ | IOMMU_NOEXEC)) return __FAIL(ops, i); - if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) + if (!arm_lpae_range_has_specific_mapping(ops, iova, + iova, size)) return __FAIL(ops, i); iova += SZ_1G; @@ -1115,11 +1524,15 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg) if (ops->unmap(ops, SZ_1G + size, size) != size) return __FAIL(ops, i); + if (arm_lpae_range_has_mapping(ops, SZ_1G + size, size)) + return __FAIL(ops, i); + /* Remap of partial unmap */ if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ)) return __FAIL(ops, i); - if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42)) + if (!arm_lpae_range_has_specific_mapping(ops, SZ_1G + size, + size, size)) return __FAIL(ops, i); /* Full unmap */ @@ -1140,9 +1553,105 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg) if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) return __FAIL(ops, i); + if (ops->unmap(ops, iova, size) != size) + return __FAIL(ops, i); + iova += SZ_1G; } + if (arm_lpae_range_has_mapping(ops, 0, SZ_2G)) + return __FAIL(ops, i); + + if ((cfg->pgsize_bitmap & SZ_2M) && + (cfg->pgsize_bitmap & SZ_4K)) { + /* mixed block + page mappings */ + iova = 0; + if (ops->map(ops, iova, iova, SZ_2M, IOMMU_READ)) + return __FAIL(ops, i); + + if (ops->map(ops, iova + SZ_2M, iova + SZ_2M, SZ_4K, + IOMMU_READ)) + return __FAIL(ops, i); + + if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) + return __FAIL(ops, i); + + if (ops->iova_to_phys(ops, iova + SZ_2M + 42) != + (iova + SZ_2M + 42)) + return __FAIL(ops, i); + + /* unmap both mappings at once */ + if (ops->unmap(ops, iova, SZ_2M + SZ_4K) != + (SZ_2M + SZ_4K)) + return __FAIL(ops, i); + + if (arm_lpae_range_has_mapping(ops, 0, SZ_2G)) + return __FAIL(ops, i); + } + + /* map_sg */ + for (j = 0; j < ARRAY_SIZE(test_sg_sizes); ++j) { + size_t mapped; + size_t unused; + struct page *page; + phys_addr_t page_phys; + struct sg_table table; + struct scatterlist *sg; + unsigned long total_size = test_sg_sizes[j]; + int chunk_size = 1UL << find_first_bit( + &cfg->pgsize_bitmap, BITS_PER_LONG); + int nents = total_size / chunk_size; + int ret; + + if (total_size < chunk_size) + continue; + + page = alloc_pages(GFP_KERNEL, get_order(chunk_size)); + if (!page) + return -ENOMEM; + page_phys = page_to_phys(page); + + iova = 0; + ret = sg_alloc_table(&table, nents, GFP_KERNEL); + if (ret) + return ret; + for_each_sg(table.sgl, sg, table.nents, k) + sg_set_page(sg, page, chunk_size, 0); + + mapped = ops->map_sg(ops, iova, table.sgl, table.nents, + IOMMU_READ | IOMMU_WRITE, &unused); + + if (mapped != total_size) + return __FAIL(ops, i); + + if (!arm_lpae_range_has_mapping(ops, iova, total_size)) + return __FAIL(ops, i); + + if (arm_lpae_range_has_mapping(ops, iova + total_size, + SZ_2G - (iova + total_size))) + return __FAIL(ops, i); + + for_each_sg(table.sgl, sg, table.nents, k) { + dma_addr_t newphys = + ops->iova_to_phys(ops, iova + 42); + if (newphys != (page_phys + 42)) + return __FAIL(ops, i); + iova += chunk_size; + } + + if (ops->unmap(ops, 0, total_size) != total_size) + return __FAIL(ops, i); + + if (arm_lpae_range_has_mapping(ops, 0, SZ_2G)) + return __FAIL(ops, i); + + sg_free_table(&table); + __free_pages(page, get_order(chunk_size)); + } + + if (arm_lpae_range_has_mapping(ops, 0, SZ_2G)) + return __FAIL(ops, i); + free_io_pgtable_ops(ops); } @@ -1154,8 +1663,6 @@ static int __init arm_lpae_do_selftests(void) { static const unsigned long pgsize[] = { SZ_4K | SZ_2M | SZ_1G, - SZ_16K | SZ_32M, - SZ_64K | SZ_512M, }; static const unsigned int ias[] = { diff --git a/drivers/iommu/io-pgtable-fast.c b/drivers/iommu/io-pgtable-fast.c new file mode 100644 index 0000000000000000000000000000000000000000..61b86f00b3f4f5f09c2da51f132d01de199106a6 --- /dev/null +++ b/drivers/iommu/io-pgtable-fast.c @@ -0,0 +1,816 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + */ + +#define pr_fmt(fmt) "io-pgtable-fast: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define AV8L_FAST_MAX_ADDR_BITS 48 + +/* Struct accessors */ +#define iof_pgtable_to_data(x) \ + container_of((x), struct av8l_fast_io_pgtable, iop) + +#define iof_pgtable_ops_to_pgtable(x) \ + container_of((x), struct io_pgtable, ops) + +#define iof_pgtable_ops_to_data(x) \ + iof_pgtable_to_data(iof_pgtable_ops_to_pgtable(x)) + +struct av8l_fast_io_pgtable { + struct io_pgtable iop; + av8l_fast_iopte *pgd; + av8l_fast_iopte *puds[4]; + av8l_fast_iopte *pmds; + struct page **pages; /* page table memory */ + int nr_pages; + dma_addr_t base; + dma_addr_t start; + dma_addr_t end; +}; + +/* Page table bits */ +#define AV8L_FAST_PTE_TYPE_SHIFT 0 +#define AV8L_FAST_PTE_TYPE_MASK 0x3 + +#define AV8L_FAST_PTE_TYPE_BLOCK 1 +#define AV8L_FAST_PTE_TYPE_TABLE 3 +#define AV8L_FAST_PTE_TYPE_PAGE 3 + +#define AV8L_FAST_PTE_NSTABLE (((av8l_fast_iopte)1) << 63) +#define AV8L_FAST_PTE_XN (((av8l_fast_iopte)3) << 53) +#define AV8L_FAST_PTE_AF (((av8l_fast_iopte)1) << 10) +#define AV8L_FAST_PTE_SH_NS (((av8l_fast_iopte)0) << 8) +#define AV8L_FAST_PTE_SH_OS (((av8l_fast_iopte)2) << 8) +#define AV8L_FAST_PTE_SH_IS (((av8l_fast_iopte)3) << 8) +#define AV8L_FAST_PTE_SH_MASK (((av8l_fast_iopte)3) << 8) +#define AV8L_FAST_PTE_NS (((av8l_fast_iopte)1) << 5) +#define AV8L_FAST_PTE_VALID (((av8l_fast_iopte)1) << 0) + +#define AV8L_FAST_PTE_ATTR_LO_MASK (((av8l_fast_iopte)0x3ff) << 2) +/* Ignore the contiguous bit for block splitting */ +#define AV8L_FAST_PTE_ATTR_HI_MASK (((av8l_fast_iopte)6) << 52) +#define AV8L_FAST_PTE_ATTR_MASK (AV8L_FAST_PTE_ATTR_LO_MASK | \ + AV8L_FAST_PTE_ATTR_HI_MASK) +#define AV8L_FAST_PTE_ADDR_MASK ((av8l_fast_iopte)0xfffffffff000) + + +/* Stage-1 PTE */ +#define AV8L_FAST_PTE_AP_PRIV_RW (((av8l_fast_iopte)0) << 6) +#define AV8L_FAST_PTE_AP_RW (((av8l_fast_iopte)1) << 6) +#define AV8L_FAST_PTE_AP_PRIV_RO (((av8l_fast_iopte)2) << 6) +#define AV8L_FAST_PTE_AP_RO (((av8l_fast_iopte)3) << 6) +#define AV8L_FAST_PTE_ATTRINDX_SHIFT 2 +#define AV8L_FAST_PTE_ATTRINDX_MASK 0x7 +#define AV8L_FAST_PTE_nG (((av8l_fast_iopte)1) << 11) + +/* Stage-2 PTE */ +#define AV8L_FAST_PTE_HAP_FAULT (((av8l_fast_iopte)0) << 6) +#define AV8L_FAST_PTE_HAP_READ (((av8l_fast_iopte)1) << 6) +#define AV8L_FAST_PTE_HAP_WRITE (((av8l_fast_iopte)2) << 6) +#define AV8L_FAST_PTE_MEMATTR_OIWB (((av8l_fast_iopte)0xf) << 2) +#define AV8L_FAST_PTE_MEMATTR_NC (((av8l_fast_iopte)0x5) << 2) +#define AV8L_FAST_PTE_MEMATTR_DEV (((av8l_fast_iopte)0x1) << 2) + +/* Register bits */ +#define ARM_32_LPAE_TCR_EAE (1 << 31) +#define ARM_64_LPAE_S2_TCR_RES1 (1 << 31) + +#define AV8L_FAST_TCR_TG0_4K (0 << 14) +#define AV8L_FAST_TCR_TG0_64K (1 << 14) +#define AV8L_FAST_TCR_TG0_16K (2 << 14) + +#define AV8L_FAST_TCR_SH0_SHIFT 12 +#define AV8L_FAST_TCR_SH0_MASK 0x3 +#define AV8L_FAST_TCR_SH_NS 0 +#define AV8L_FAST_TCR_SH_OS 2 +#define AV8L_FAST_TCR_SH_IS 3 + +#define AV8L_FAST_TCR_ORGN0_SHIFT 10 +#define AV8L_FAST_TCR_IRGN0_SHIFT 8 +#define AV8L_FAST_TCR_RGN_MASK 0x3 +#define AV8L_FAST_TCR_RGN_NC 0 +#define AV8L_FAST_TCR_RGN_WBWA 1 +#define AV8L_FAST_TCR_RGN_WT 2 +#define AV8L_FAST_TCR_RGN_WB 3 + +#define AV8L_FAST_TCR_SL0_SHIFT 6 +#define AV8L_FAST_TCR_SL0_MASK 0x3 + +#define AV8L_FAST_TCR_T0SZ_SHIFT 0 +#define AV8L_FAST_TCR_SZ_MASK 0xf + +#define AV8L_FAST_TCR_PS_SHIFT 16 +#define AV8L_FAST_TCR_PS_MASK 0x7 + +#define AV8L_FAST_TCR_IPS_SHIFT 32 +#define AV8L_FAST_TCR_IPS_MASK 0x7 + +#define AV8L_FAST_TCR_PS_32_BIT 0x0ULL +#define AV8L_FAST_TCR_PS_36_BIT 0x1ULL +#define AV8L_FAST_TCR_PS_40_BIT 0x2ULL +#define AV8L_FAST_TCR_PS_42_BIT 0x3ULL +#define AV8L_FAST_TCR_PS_44_BIT 0x4ULL +#define AV8L_FAST_TCR_PS_48_BIT 0x5ULL + +#define AV8L_FAST_TCR_EPD1_SHIFT 23 +#define AV8L_FAST_TCR_EPD1_FAULT 1 + +#define AV8L_FAST_MAIR_ATTR_SHIFT(n) ((n) << 3) +#define AV8L_FAST_MAIR_ATTR_MASK 0xff +#define AV8L_FAST_MAIR_ATTR_DEVICE 0x04 +#define AV8L_FAST_MAIR_ATTR_NC 0x44 +#define AV8L_FAST_MAIR_ATTR_WBRWA 0xff +#define AV8L_FAST_MAIR_ATTR_UPSTREAM 0xf4 +#define AV8L_FAST_MAIR_ATTR_IDX_NC 0 +#define AV8L_FAST_MAIR_ATTR_IDX_CACHE 1 +#define AV8L_FAST_MAIR_ATTR_IDX_DEV 2 +#define AV8L_FAST_MAIR_ATTR_IDX_UPSTREAM 3 + +#define AV8L_FAST_PAGE_SHIFT 12 + +#define PTE_MAIR_IDX(pte) \ + ((pte >> AV8L_FAST_PTE_ATTRINDX_SHIFT) & \ + AV8L_FAST_PTE_ATTRINDX_MASK) + +#define PTE_SH_IDX(pte) (pte & AV8L_FAST_PTE_SH_MASK) + +#define iopte_pmd_offset(pmds, base, iova) (pmds + ((iova - base) >> 12)) + +#ifdef CONFIG_IOMMU_IO_PGTABLE_FAST_PROVE_TLB + +#include +#include + +static ATOMIC_NOTIFIER_HEAD(av8l_notifier_list); + +void av8l_register_notify(struct notifier_block *nb) +{ + atomic_notifier_chain_register(&av8l_notifier_list, nb); +} +EXPORT_SYMBOL(av8l_register_notify); + +static void __av8l_check_for_stale_tlb(av8l_fast_iopte *ptep) +{ + if (unlikely(*ptep)) { + atomic_notifier_call_chain( + &av8l_notifier_list, MAPPED_OVER_STALE_TLB, + (void *) ptep); + pr_err("Tried to map over a non-vacant pte: 0x%llx @ %p\n", + *ptep, ptep); + pr_err("Nearby memory:\n"); + print_hex_dump(KERN_ERR, "pgtbl: ", DUMP_PREFIX_ADDRESS, + 32, 8, ptep - 16, 32 * sizeof(*ptep), false); + } +} + +void av8l_fast_clear_stale_ptes(struct io_pgtable_ops *ops, u64 base, + u64 start, u64 end, bool skip_sync) +{ + int i; + struct av8l_fast_io_pgtable *data = iof_pgtable_ops_to_data(ops); + av8l_fast_iopte *pmdp = iopte_pmd_offset(pmds, base, start); + + for (i = start >> AV8L_FAST_PAGE_SHIFT; + i <= (end >> AV8L_FAST_PAGE_SHIFT); ++i) { + if (!(*pmdp & AV8L_FAST_PTE_VALID)) { + *pmdp = 0; + if (!skip_sync) + dmac_clean_range(pmdp, pmdp + 1); + } + pmdp++; + } +} +#else +static void __av8l_check_for_stale_tlb(av8l_fast_iopte *ptep) +{ +} +#endif + +static void av8l_clean_range(struct io_pgtable_ops *ops, + av8l_fast_iopte *start, av8l_fast_iopte *end) +{ + struct io_pgtable *iop = iof_pgtable_ops_to_pgtable(ops); + + if (!(iop->cfg.quirks & IO_PGTABLE_QUIRK_NO_DMA)) + dmac_clean_range(start, end); +} + +static av8l_fast_iopte +av8l_fast_prot_to_pte(struct av8l_fast_io_pgtable *data, int prot) +{ + av8l_fast_iopte pte = AV8L_FAST_PTE_XN + | AV8L_FAST_PTE_TYPE_PAGE + | AV8L_FAST_PTE_AF + | AV8L_FAST_PTE_nG + | AV8L_FAST_PTE_SH_OS; + + if (prot & IOMMU_MMIO) + pte |= (AV8L_FAST_MAIR_ATTR_IDX_DEV + << AV8L_FAST_PTE_ATTRINDX_SHIFT); + else if (prot & IOMMU_CACHE) + pte |= (AV8L_FAST_MAIR_ATTR_IDX_CACHE + << AV8L_FAST_PTE_ATTRINDX_SHIFT); + else if (prot & IOMMU_USE_UPSTREAM_HINT) + pte |= (AV8L_FAST_MAIR_ATTR_IDX_UPSTREAM + << AV8L_FAST_PTE_ATTRINDX_SHIFT); + + if (!(prot & IOMMU_WRITE)) + pte |= AV8L_FAST_PTE_AP_RO; + else + pte |= AV8L_FAST_PTE_AP_RW; + + return pte; +} + +static int av8l_fast_map(struct io_pgtable_ops *ops, unsigned long iova, + phys_addr_t paddr, size_t size, int prot) +{ + struct av8l_fast_io_pgtable *data = iof_pgtable_ops_to_data(ops); + av8l_fast_iopte *ptep = iopte_pmd_offset(data->pmds, data->base, iova); + unsigned long i, nptes = size >> AV8L_FAST_PAGE_SHIFT; + av8l_fast_iopte pte; + + pte = av8l_fast_prot_to_pte(data, prot); + paddr &= AV8L_FAST_PTE_ADDR_MASK; + for (i = 0; i < nptes; i++, paddr += SZ_4K) { + __av8l_check_for_stale_tlb(ptep + i); + *(ptep + i) = pte | paddr; + } + av8l_clean_range(ops, ptep, ptep + nptes); + + return 0; +} + +int av8l_fast_map_public(struct io_pgtable_ops *ops, unsigned long iova, + phys_addr_t paddr, size_t size, int prot) +{ + return av8l_fast_map(ops, iova, paddr, size, prot); +} + +static size_t +__av8l_fast_unmap(struct io_pgtable_ops *ops, unsigned long iova, + size_t size, bool allow_stale_tlb) +{ + struct av8l_fast_io_pgtable *data = iof_pgtable_ops_to_data(ops); + unsigned long nptes; + av8l_fast_iopte *ptep; + int val = allow_stale_tlb + ? AV8L_FAST_PTE_UNMAPPED_NEED_TLBI + : 0; + + ptep = iopte_pmd_offset(data->pmds, data->base, iova); + nptes = size >> AV8L_FAST_PAGE_SHIFT; + + memset(ptep, val, sizeof(*ptep) * nptes); + av8l_clean_range(ops, ptep, ptep + nptes); + if (!allow_stale_tlb) + io_pgtable_tlb_flush_all(&data->iop); + + return size; +} + +/* caller must take care of tlb cache maintenance */ +void av8l_fast_unmap_public(struct io_pgtable_ops *ops, unsigned long iova, + size_t size) +{ + __av8l_fast_unmap(ops, iova, size, true); +} + +static size_t av8l_fast_unmap(struct io_pgtable_ops *ops, unsigned long iova, + size_t size) +{ + return __av8l_fast_unmap(ops, iova, size, false); +} + +static int av8l_fast_map_sg(struct io_pgtable_ops *ops, + unsigned long iova, struct scatterlist *sgl, + unsigned int nents, int prot, size_t *size) +{ + struct scatterlist *sg; + int i; + + for_each_sg(sgl, sg, nents, i) { + av8l_fast_map(ops, iova, sg_phys(sg), sg->length, prot); + iova += sg->length; + } + + return nents; +} + +int av8l_fast_map_sg_public(struct io_pgtable_ops *ops, + unsigned long iova, struct scatterlist *sgl, + unsigned int nents, int prot, size_t *size) +{ + return av8l_fast_map_sg(ops, iova, sgl, nents, prot, size); +} + +#if defined(CONFIG_ARM64) +#define FAST_PGDNDX(va) (((va) & 0x7fc0000000) >> 27) +#elif defined(CONFIG_ARM) +#define FAST_PGDNDX(va) (((va) & 0xc0000000) >> 27) +#endif + +static phys_addr_t av8l_fast_iova_to_phys(struct io_pgtable_ops *ops, + unsigned long iova) +{ + struct av8l_fast_io_pgtable *data = iof_pgtable_ops_to_data(ops); + av8l_fast_iopte pte, *pgdp, *pudp, *pmdp; + unsigned long pgd; + phys_addr_t phys; + const unsigned long pts = AV8L_FAST_PTE_TYPE_SHIFT; + const unsigned long ptm = AV8L_FAST_PTE_TYPE_MASK; + const unsigned long ptt = AV8L_FAST_PTE_TYPE_TABLE; + const unsigned long ptp = AV8L_FAST_PTE_TYPE_PAGE; + const av8l_fast_iopte am = AV8L_FAST_PTE_ADDR_MASK; + + /* TODO: clean up some of these magic numbers... */ + + pgd = (unsigned long)data->pgd | FAST_PGDNDX(iova); + pgdp = (av8l_fast_iopte *)pgd; + + pte = *pgdp; + if (((pte >> pts) & ptm) != ptt) + return 0; + pudp = phys_to_virt((pte & am) | ((iova & 0x3fe00000) >> 18)); + + pte = *pudp; + if (((pte >> pts) & ptm) != ptt) + return 0; + pmdp = phys_to_virt((pte & am) | ((iova & 0x1ff000) >> 9)); + + pte = *pmdp; + if (((pte >> pts) & ptm) != ptp) + return 0; + phys = pte & am; + + return phys | (iova & 0xfff); +} + +phys_addr_t av8l_fast_iova_to_phys_public(struct io_pgtable_ops *ops, + unsigned long iova) +{ + return av8l_fast_iova_to_phys(ops, iova); +} + +static bool av8l_fast_iova_coherent(struct io_pgtable_ops *ops, + unsigned long iova) +{ + struct av8l_fast_io_pgtable *data = iof_pgtable_ops_to_data(ops); + av8l_fast_iopte *ptep = iopte_pmd_offset(data->pmds, data->base, iova); + + return ((PTE_MAIR_IDX(*ptep) == AV8L_FAST_MAIR_ATTR_IDX_CACHE) && + ((PTE_SH_IDX(*ptep) == AV8L_FAST_PTE_SH_OS) || + (PTE_SH_IDX(*ptep) == AV8L_FAST_PTE_SH_IS))); +} + +bool av8l_fast_iova_coherent_public(struct io_pgtable_ops *ops, + unsigned long iova) +{ + return av8l_fast_iova_coherent(ops, iova); +} + +static struct av8l_fast_io_pgtable * +av8l_fast_alloc_pgtable_data(struct io_pgtable_cfg *cfg) +{ + struct av8l_fast_io_pgtable *data; + + data = kmalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return NULL; + + data->iop.ops = (struct io_pgtable_ops) { + .map = av8l_fast_map, + .map_sg = av8l_fast_map_sg, + .unmap = av8l_fast_unmap, + .iova_to_phys = av8l_fast_iova_to_phys, + .is_iova_coherent = av8l_fast_iova_coherent, + }; + + return data; +} + +/* + * We need max 1 page for the pgd, 4 pages for puds (1GB VA per pud page) and + * 2048 pages for pmds (each pud page contains 512 table entries, each + * pointing to a pmd). + */ +#define NUM_PGD_PAGES 1 +#define NUM_PUD_PAGES 4 +#define NUM_PMD_PAGES 2048 +#define NUM_PGTBL_PAGES (NUM_PGD_PAGES + NUM_PUD_PAGES + NUM_PMD_PAGES) + +/* undefine arch specific definitions which depends on page table format */ +#undef pud_index +#undef pud_mask +#undef pud_next +#undef pmd_index +#undef pmd_mask +#undef pmd_next + +#define pud_index(addr) (((addr) >> 30) & 0x3) +#define pud_mask(addr) ((addr) & ~((1UL << 30) - 1)) +#define pud_next(addr, end) \ +({ unsigned long __boundary = pud_mask(addr + (1UL << 30));\ + (__boundary - 1 < (end) - 1) ? __boundary : (end); \ +}) + +#define pmd_index(addr) (((addr) >> 21) & 0x1ff) +#define pmd_mask(addr) ((addr) & ~((1UL << 21) - 1)) +#define pmd_next(addr, end) \ +({ unsigned long __boundary = pmd_mask(addr + (1UL << 21));\ + (__boundary - 1 < (end) - 1) ? __boundary : (end); \ +}) + +static int +av8l_fast_prepopulate_pgtables(struct av8l_fast_io_pgtable *data, + struct io_pgtable_cfg *cfg, void *cookie) +{ + int i, j, pg = 0; + struct page **pages, *page; + dma_addr_t base = cfg->iova_base; + dma_addr_t end = cfg->iova_end; + dma_addr_t pud, pmd; + int pmd_pg_index; + + pages = kmalloc(sizeof(*pages) * NUM_PGTBL_PAGES, __GFP_NOWARN | + __GFP_NORETRY); + + if (!pages) + pages = vmalloc(sizeof(*pages) * NUM_PGTBL_PAGES); + + if (!pages) + return -ENOMEM; + + page = alloc_page(GFP_KERNEL | __GFP_ZERO); + if (!page) + goto err_free_pages_arr; + pages[pg++] = page; + data->pgd = page_address(page); + + /* + * We need max 2048 entries at level 2 to map 4GB of VA space. A page + * can hold 512 entries, so we need max 4 pages. + */ + for (i = pud_index(base), pud = base; pud < end; + ++i, pud = pud_next(pud, end)) { + av8l_fast_iopte pte, *ptep; + + page = alloc_page(GFP_KERNEL | __GFP_ZERO); + if (!page) + goto err_free_pages; + pages[pg++] = page; + data->puds[i] = page_address(page); + pte = page_to_phys(page) | AV8L_FAST_PTE_TYPE_TABLE; + ptep = ((av8l_fast_iopte *)data->pgd) + i; + *ptep = pte; + } + dmac_clean_range(data->pgd, data->pgd + 4); + + /* + * We have max 4 puds, each of which can point to 512 pmds, so we'll + * have max 2048 pmds, each of which can hold 512 ptes, for a grand + * total of 2048*512=1048576 PTEs. + */ + pmd_pg_index = pg; + for (i = pud_index(base), pud = base; pud < end; + ++i, pud = pud_next(pud, end)) { + for (j = pmd_index(pud), pmd = pud; pmd < pud_next(pud, end); + ++j, pmd = pmd_next(pmd, end)) { + av8l_fast_iopte pte, *pudp; + void *addr; + + page = alloc_page(GFP_KERNEL | __GFP_ZERO); + if (!page) + goto err_free_pages; + pages[pg++] = page; + + addr = page_address(page); + dmac_clean_range(addr, addr + SZ_4K); + + pte = page_to_phys(page) | AV8L_FAST_PTE_TYPE_TABLE; + pudp = data->puds[i] + j; + *pudp = pte; + } + dmac_clean_range(data->puds[i], data->puds[i] + 512); + } + + /* + * We map the pmds into a virtually contiguous space so that we + * don't have to traverse the first two levels of the page tables + * to find the appropriate pud. Instead, it will be a simple + * offset from the virtual base of the pmds. + */ + data->pmds = vmap(&pages[pmd_pg_index], pg - pmd_pg_index, + VM_IOREMAP, PAGE_KERNEL); + if (!data->pmds) + goto err_free_pages; + + data->pages = pages; + data->nr_pages = pg; + data->base = base; + data->end = end; + return 0; + +err_free_pages: + for (i = 0; i < pg; ++i) + __free_page(pages[i]); +err_free_pages_arr: + kvfree(pages); + return -ENOMEM; +} + +static struct io_pgtable * +av8l_fast_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie) +{ + u64 reg; + struct av8l_fast_io_pgtable *data = + av8l_fast_alloc_pgtable_data(cfg); + + if (!data) + return NULL; + + /* restrict according to the fast map requirements */ + cfg->ias = 32; + cfg->pgsize_bitmap = SZ_4K; + + /* TCR */ + if (cfg->quirks & IO_PGTABLE_QUIRK_QCOM_USE_UPSTREAM_HINT) + reg = (AV8L_FAST_TCR_SH_OS << AV8L_FAST_TCR_SH0_SHIFT) | + (AV8L_FAST_TCR_RGN_NC << AV8L_FAST_TCR_IRGN0_SHIFT) | + (AV8L_FAST_TCR_RGN_WBWA << AV8L_FAST_TCR_ORGN0_SHIFT); + else if (cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA) + reg = (AV8L_FAST_TCR_SH_OS << AV8L_FAST_TCR_SH0_SHIFT) | + (AV8L_FAST_TCR_RGN_WBWA << AV8L_FAST_TCR_IRGN0_SHIFT) | + (AV8L_FAST_TCR_RGN_WBWA << AV8L_FAST_TCR_ORGN0_SHIFT); + else + reg = (AV8L_FAST_TCR_SH_OS << AV8L_FAST_TCR_SH0_SHIFT) | + (AV8L_FAST_TCR_RGN_NC << AV8L_FAST_TCR_IRGN0_SHIFT) | + (AV8L_FAST_TCR_RGN_NC << AV8L_FAST_TCR_ORGN0_SHIFT); + + reg |= AV8L_FAST_TCR_TG0_4K; + + switch (cfg->oas) { + case 32: + reg |= (AV8L_FAST_TCR_PS_32_BIT << AV8L_FAST_TCR_IPS_SHIFT); + break; + case 36: + reg |= (AV8L_FAST_TCR_PS_36_BIT << AV8L_FAST_TCR_IPS_SHIFT); + break; + case 40: + reg |= (AV8L_FAST_TCR_PS_40_BIT << AV8L_FAST_TCR_IPS_SHIFT); + break; + case 42: + reg |= (AV8L_FAST_TCR_PS_42_BIT << AV8L_FAST_TCR_IPS_SHIFT); + break; + case 44: + reg |= (AV8L_FAST_TCR_PS_44_BIT << AV8L_FAST_TCR_IPS_SHIFT); + break; + case 48: + reg |= (AV8L_FAST_TCR_PS_48_BIT << AV8L_FAST_TCR_IPS_SHIFT); + break; + default: + goto out_free_data; + } + + reg |= (64ULL - cfg->ias) << AV8L_FAST_TCR_T0SZ_SHIFT; + reg |= AV8L_FAST_TCR_EPD1_FAULT << AV8L_FAST_TCR_EPD1_SHIFT; +#if defined(CONFIG_ARM) + reg |= ARM_32_LPAE_TCR_EAE; +#endif + cfg->av8l_fast_cfg.tcr = reg; + + /* MAIRs */ + reg = (AV8L_FAST_MAIR_ATTR_NC + << AV8L_FAST_MAIR_ATTR_SHIFT(AV8L_FAST_MAIR_ATTR_IDX_NC)) | + (AV8L_FAST_MAIR_ATTR_WBRWA + << AV8L_FAST_MAIR_ATTR_SHIFT(AV8L_FAST_MAIR_ATTR_IDX_CACHE)) | + (AV8L_FAST_MAIR_ATTR_DEVICE + << AV8L_FAST_MAIR_ATTR_SHIFT(AV8L_FAST_MAIR_ATTR_IDX_DEV)) | + (AV8L_FAST_MAIR_ATTR_UPSTREAM + << AV8L_FAST_MAIR_ATTR_SHIFT(AV8L_FAST_MAIR_ATTR_IDX_UPSTREAM)); + + cfg->av8l_fast_cfg.mair[0] = reg; + cfg->av8l_fast_cfg.mair[1] = 0; + + /* Allocate all page table memory! */ + if (av8l_fast_prepopulate_pgtables(data, cfg, cookie)) + goto out_free_data; + + cfg->av8l_fast_cfg.pmds = data->pmds; + + /* TTBRs */ + cfg->av8l_fast_cfg.ttbr[0] = virt_to_phys(data->pgd); + cfg->av8l_fast_cfg.ttbr[1] = 0; + return &data->iop; + +out_free_data: + kfree(data); + return NULL; +} + +static void av8l_fast_free_pgtable(struct io_pgtable *iop) +{ + int i; + struct av8l_fast_io_pgtable *data = iof_pgtable_to_data(iop); + + vunmap(data->pmds); + for (i = 0; i < data->nr_pages; ++i) + __free_page(data->pages[i]); + kvfree(data->pages); + kfree(data); +} + +struct io_pgtable_init_fns io_pgtable_av8l_fast_init_fns = { + .alloc = av8l_fast_alloc_pgtable, + .free = av8l_fast_free_pgtable, +}; + + +#ifdef CONFIG_IOMMU_IO_PGTABLE_FAST_SELFTEST + +#include + +static struct io_pgtable_cfg *cfg_cookie; + +static void dummy_tlb_flush_all(void *cookie) +{ + WARN_ON(cookie != cfg_cookie); +} + +static void dummy_tlb_add_flush(unsigned long iova, size_t size, size_t granule, + bool leaf, void *cookie) +{ + WARN_ON(cookie != cfg_cookie); + WARN_ON(!(size & cfg_cookie->pgsize_bitmap)); +} + +static void dummy_tlb_sync(void *cookie) +{ + WARN_ON(cookie != cfg_cookie); +} + +static struct iommu_gather_ops dummy_tlb_ops __initdata = { + .tlb_flush_all = dummy_tlb_flush_all, + .tlb_add_flush = dummy_tlb_add_flush, + .tlb_sync = dummy_tlb_sync, +}; + +/* + * Returns true if the iova range is successfully mapped to the contiguous + * phys range in ops. + */ +static bool av8l_fast_range_has_specific_mapping(struct io_pgtable_ops *ops, + const unsigned long iova_start, + const phys_addr_t phys_start, + const size_t size) +{ + u64 iova = iova_start; + phys_addr_t phys = phys_start; + + while (iova < (iova_start + size)) { + /* + 42 just to make sure offsetting is working */ + if (ops->iova_to_phys(ops, iova + 42) != (phys + 42)) + return false; + iova += SZ_4K; + phys += SZ_4K; + } + return true; +} + +static int __init av8l_fast_positive_testing(void) +{ + int failed = 0; + u64 iova; + struct io_pgtable_ops *ops; + struct io_pgtable_cfg cfg; + struct av8l_fast_io_pgtable *data; + av8l_fast_iopte *pmds; + u64 max = SZ_1G * 4ULL - 1; + u64 base = 0; + + cfg = (struct io_pgtable_cfg) { + .quirks = 0, + .tlb = &dummy_tlb_ops, + .ias = 32, + .oas = 32, + .pgsize_bitmap = SZ_4K, + .iova_base = base, + .iova_end = max, + }; + + cfg_cookie = &cfg; + ops = alloc_io_pgtable_ops(ARM_V8L_FAST, &cfg, &cfg); + + if (WARN_ON(!ops)) + return 1; + + data = iof_pgtable_ops_to_data(ops); + pmds = data->pmds; + + /* map the entire 4GB VA space with 4K map calls */ + for (iova = base; iova < max; iova += SZ_4K) { + if (WARN_ON(ops->map(ops, iova, iova, SZ_4K, IOMMU_READ))) { + failed++; + continue; + } + } + if (WARN_ON(!av8l_fast_range_has_specific_mapping(ops, base, + base, max - base))) + failed++; + + /* unmap it all */ + for (iova = base; iova < max; iova += SZ_4K) { + if (WARN_ON(ops->unmap(ops, iova, SZ_4K) != SZ_4K)) + failed++; + } + + /* sweep up TLB proving PTEs */ + av8l_fast_clear_stale_ptes(pmds, base, base, max, false); + + /* map the entire 4GB VA space with 8K map calls */ + for (iova = base; iova < max; iova += SZ_8K) { + if (WARN_ON(ops->map(ops, iova, iova, SZ_8K, IOMMU_READ))) { + failed++; + continue; + } + } + + if (WARN_ON(!av8l_fast_range_has_specific_mapping(ops, base, + base, max - base))) + failed++; + + /* unmap it all with 8K unmap calls */ + for (iova = base; iova < max; iova += SZ_8K) { + if (WARN_ON(ops->unmap(ops, iova, SZ_8K) != SZ_8K)) + failed++; + } + + /* sweep up TLB proving PTEs */ + av8l_fast_clear_stale_ptes(pmds, base, base, max, false); + + /* map the entire 4GB VA space with 16K map calls */ + for (iova = base; iova < max; iova += SZ_16K) { + if (WARN_ON(ops->map(ops, iova, iova, SZ_16K, IOMMU_READ))) { + failed++; + continue; + } + } + + if (WARN_ON(!av8l_fast_range_has_specific_mapping(ops, base, + base, max - base))) + failed++; + + /* unmap it all */ + for (iova = base; iova < max; iova += SZ_16K) { + if (WARN_ON(ops->unmap(ops, iova, SZ_16K) != SZ_16K)) + failed++; + } + + /* sweep up TLB proving PTEs */ + av8l_fast_clear_stale_ptes(pmds, base, base, max, false); + + /* map the entire 4GB VA space with 64K map calls */ + for (iova = base; iova < max; iova += SZ_64K) { + if (WARN_ON(ops->map(ops, iova, iova, SZ_64K, IOMMU_READ))) { + failed++; + continue; + } + } + + if (WARN_ON(!av8l_fast_range_has_specific_mapping(ops, base, + base, max - base))) + failed++; + + /* unmap it all at once */ + if (WARN_ON(ops->unmap(ops, base, max - base) != (max - base))) + failed++; + + free_io_pgtable_ops(ops); + return failed; +} + +static int __init av8l_fast_do_selftests(void) +{ + int failed = 0; + + failed += av8l_fast_positive_testing(); + + pr_err("selftest: completed with %d failures\n", failed); + + return 0; +} +subsys_initcall(av8l_fast_do_selftests); +#endif diff --git a/drivers/iommu/io-pgtable.c b/drivers/iommu/io-pgtable.c index 93f2880be6c67ccede1e19f45025379fdd4859cf..b720e39067a231146d6f33909bd34e10f8bad4f3 100644 --- a/drivers/iommu/io-pgtable.c +++ b/drivers/iommu/io-pgtable.c @@ -18,10 +18,16 @@ * Author: Will Deacon */ +#define pr_fmt(fmt) "io-pgtable: " fmt + #include #include #include #include +#include +#include +#include +#include static const struct io_pgtable_init_fns * io_pgtable_init_table[IO_PGTABLE_NUM_FMTS] = { @@ -34,8 +40,13 @@ io_pgtable_init_table[IO_PGTABLE_NUM_FMTS] = { #ifdef CONFIG_IOMMU_IO_PGTABLE_ARMV7S [ARM_V7S] = &io_pgtable_arm_v7s_init_fns, #endif +#ifdef CONFIG_IOMMU_IO_PGTABLE_FAST + [ARM_V8L_FAST] = &io_pgtable_av8l_fast_init_fns, +#endif }; +static struct dentry *io_pgtable_top; + struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt, struct io_pgtable_cfg *cfg, void *cookie) @@ -78,3 +89,51 @@ void free_io_pgtable_ops(struct io_pgtable_ops *ops) io_pgtable_init_table[iop->fmt]->free(iop); } EXPORT_SYMBOL_GPL(free_io_pgtable_ops); + +static atomic_t pages_allocated; + +void *io_pgtable_alloc_pages_exact(struct io_pgtable_cfg *cfg, void *cookie, + size_t size, gfp_t gfp_mask) +{ + void *ret; + + if (cfg->tlb->alloc_pages_exact) + ret = cfg->tlb->alloc_pages_exact(cookie, size, gfp_mask); + else + ret = alloc_pages_exact(size, gfp_mask); + + if (likely(ret)) + atomic_add(1 << get_order(size), &pages_allocated); + + return ret; +} + +void io_pgtable_free_pages_exact(struct io_pgtable_cfg *cfg, void *cookie, + void *virt, size_t size) +{ + if (cfg->tlb->free_pages_exact) + cfg->tlb->free_pages_exact(cookie, virt, size); + else + free_pages_exact(virt, size); + + atomic_sub(1 << get_order(size), &pages_allocated); +} + +static int __init io_pgtable_init(void) +{ + static const char io_pgtable_str[] __initconst = "io-pgtable"; + static const char pages_str[] __initconst = "pages"; + + io_pgtable_top = debugfs_create_dir(io_pgtable_str, iommu_debugfs_top); + debugfs_create_atomic_t(pages_str, 0600, io_pgtable_top, + &pages_allocated); + return 0; +} + +static void __exit io_pgtable_exit(void) +{ + debugfs_remove_recursive(io_pgtable_top); +} + +module_init(io_pgtable_init); +module_exit(io_pgtable_exit); diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 4a5cf9c4f4ba014decc39d3c9250db32152e6ef9..0c1056610ea2f056a463e3848129ea5d071694eb 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -1210,7 +1210,6 @@ static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops) if (err) goto out_err; - return 0; out_err: @@ -1309,6 +1308,8 @@ static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus, domain->type = type; /* Assume all sizes by default; the driver may override this later */ domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap; + domain->is_debug_domain = false; + memset(domain->name, 0, IOMMU_DOMAIN_NAME_LEN); return domain; } @@ -1337,8 +1338,14 @@ static int __iommu_attach_device(struct iommu_domain *domain, return -ENODEV; ret = domain->ops->attach_dev(domain, dev); - if (!ret) + if (!ret) { trace_attach_device_to_domain(dev); + + if (!strnlen(domain->name, IOMMU_DOMAIN_NAME_LEN)) { + strlcpy(domain->name, dev_name(dev), + IOMMU_DOMAIN_NAME_LEN); + } + } return ret; } @@ -1445,9 +1452,6 @@ static int __iommu_attach_group(struct iommu_domain *domain, { int ret; - if (group->default_domain && group->domain != group->default_domain) - return -EBUSY; - ret = __iommu_group_for_each_dev(group, domain, iommu_group_do_attach_device); if (ret == 0) @@ -1477,28 +1481,18 @@ static int iommu_group_do_detach_device(struct device *dev, void *data) return 0; } +/* + * Although upstream implements detaching the default_domain as a noop, + * the "SID switch" secure usecase require complete removal of SIDS/SMRS + * from HLOS iommu registers. + */ static void __iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group) { - int ret; - - if (!group->default_domain) { - __iommu_group_for_each_dev(group, domain, + __iommu_group_for_each_dev(group, domain, iommu_group_do_detach_device); - group->domain = NULL; - return; - } - - if (group->domain == group->default_domain) - return; - - /* Detach by re-attaching to the default domain */ - ret = __iommu_group_for_each_dev(group, group->default_domain, - iommu_group_do_attach_device); - if (ret != 0) - WARN_ON(1); - else - group->domain = group->default_domain; + group->domain = NULL; + return; } void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group) @@ -1518,8 +1512,34 @@ phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) } EXPORT_SYMBOL_GPL(iommu_iova_to_phys); -static size_t iommu_pgsize(struct iommu_domain *domain, - unsigned long addr_merge, size_t size) +phys_addr_t iommu_iova_to_phys_hard(struct iommu_domain *domain, + dma_addr_t iova) +{ + if (unlikely(domain->ops->iova_to_phys_hard == NULL)) + return 0; + + return domain->ops->iova_to_phys_hard(domain, iova); +} + +uint64_t iommu_iova_to_pte(struct iommu_domain *domain, + dma_addr_t iova) +{ + if (unlikely(domain->ops->iova_to_pte == NULL)) + return 0; + + return domain->ops->iova_to_pte(domain, iova); +} + +bool iommu_is_iova_coherent(struct iommu_domain *domain, dma_addr_t iova) +{ + if (unlikely(domain->ops->is_iova_coherent == NULL)) + return 0; + + return domain->ops->is_iova_coherent(domain, iova); +} + +size_t iommu_pgsize(unsigned long pgsize_bitmap, + unsigned long addr_merge, size_t size) { unsigned int pgsize_idx; size_t pgsize; @@ -1538,10 +1558,14 @@ static size_t iommu_pgsize(struct iommu_domain *domain, pgsize = (1UL << (pgsize_idx + 1)) - 1; /* throw away page sizes not supported by the hardware */ - pgsize &= domain->pgsize_bitmap; + pgsize &= pgsize_bitmap; /* make sure we're still sane */ - BUG_ON(!pgsize); + if (!pgsize) { + pr_err("invalid pgsize/addr/size! 0x%lx 0x%lx 0x%zx\n", + pgsize_bitmap, addr_merge, size); + BUG(); + } /* pick the biggest page */ pgsize_idx = __fls(pgsize); @@ -1583,7 +1607,8 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova, pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size); while (size) { - size_t pgsize = iommu_pgsize(domain, iova | paddr, size); + size_t pgsize = iommu_pgsize(domain->pgsize_bitmap, + iova | paddr, size); pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n", iova, &paddr, pgsize); @@ -1601,7 +1626,7 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova, if (ret) iommu_unmap(domain, orig_iova, orig_size - size); else - trace_map(orig_iova, orig_paddr, orig_size); + trace_map(domain, orig_iova, orig_paddr, orig_size, prot); return ret; } @@ -1644,14 +1669,14 @@ static size_t __iommu_unmap(struct iommu_domain *domain, * or we hit an area that isn't mapped. */ while (unmapped < size) { - size_t pgsize = iommu_pgsize(domain, iova, size - unmapped); + size_t left = size - unmapped; - unmapped_page = ops->unmap(domain, iova, pgsize); + unmapped_page = ops->unmap(domain, iova, left); if (!unmapped_page) break; if (sync && ops->iotlb_range_add) - ops->iotlb_range_add(domain, iova, pgsize); + ops->iotlb_range_add(domain, iova, left); pr_debug("unmapped: iova 0x%lx size 0x%zx\n", iova, unmapped_page); @@ -1663,7 +1688,7 @@ static size_t __iommu_unmap(struct iommu_domain *domain, if (sync && ops->iotlb_sync) ops->iotlb_sync(domain); - trace_unmap(orig_iova, size, unmapped); + trace_unmap(domain, orig_iova, size, unmapped); return unmapped; } @@ -1681,7 +1706,19 @@ size_t iommu_unmap_fast(struct iommu_domain *domain, } EXPORT_SYMBOL_GPL(iommu_unmap_fast); -size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, +size_t iommu_map_sg(struct iommu_domain *domain, + unsigned long iova, struct scatterlist *sg, + unsigned int nents, int prot) +{ + size_t mapped; + + mapped = domain->ops->map_sg(domain, iova, sg, nents, prot); + trace_map_sg(domain, iova, mapped, prot); + return mapped; +} +EXPORT_SYMBOL(iommu_map_sg); + +size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova, struct scatterlist *sg, unsigned int nents, int prot) { struct scatterlist *s; @@ -1722,7 +1759,7 @@ size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, return 0; } -EXPORT_SYMBOL_GPL(iommu_map_sg); +EXPORT_SYMBOL_GPL(default_iommu_map_sg); int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr, phys_addr_t paddr, u64 size, int prot) @@ -1786,6 +1823,9 @@ int report_iommu_fault(struct iommu_domain *domain, struct device *dev, } EXPORT_SYMBOL_GPL(report_iommu_fault); +struct dentry *iommu_debugfs_top; +EXPORT_SYMBOL_GPL(iommu_debugfs_top); + static int __init iommu_init(void) { iommu_group_kset = kset_create_and_add("iommu_groups", @@ -1879,6 +1919,21 @@ void iommu_put_resv_regions(struct device *dev, struct list_head *list) ops->put_resv_regions(dev, list); } +/** + * iommu_trigger_fault() - trigger an IOMMU fault + * @domain: iommu domain + * + * Triggers a fault on the device to which this domain is attached. + * + * This function should only be used for debugging purposes, for obvious + * reasons. + */ +void iommu_trigger_fault(struct iommu_domain *domain, unsigned long flags) +{ + if (domain->ops->trigger_fault) + domain->ops->trigger_fault(domain, flags); +} + struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot, enum iommu_resv_type type) diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index 34c058c24b9d2e81cdd56da883f5109a9df06354..19259ae3346c38c0b8d0a5ef96393a8cd8fb95a7 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c @@ -61,6 +61,7 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule, iovad->anchor.pfn_lo = iovad->anchor.pfn_hi = IOVA_ANCHOR; rb_link_node(&iovad->anchor.node, NULL, &iovad->rbroot.rb_node); rb_insert_color(&iovad->anchor.node, &iovad->rbroot); + iovad->best_fit = false; init_iova_rcaches(iovad); } EXPORT_SYMBOL_GPL(init_iova_domain); @@ -186,6 +187,24 @@ iova_insert_rbtree(struct rb_root *root, struct iova *iova, rb_insert_color(&iova->node, root); } +#ifdef CONFIG_ARM64_DMA_IOMMU_ALIGNMENT +static unsigned long limit_align(struct iova_domain *iovad, + unsigned long shift) +{ + unsigned long max; + + max = CONFIG_ARM64_DMA_IOMMU_ALIGNMENT + PAGE_SHIFT + - iova_shift(iovad); + return min(shift, max); +} +#else +static unsigned long limit_align(struct iova_domain *iovad, + unsigned long shift) +{ + return shift; +} +#endif + static int __alloc_and_insert_iova_range(struct iova_domain *iovad, unsigned long size, unsigned long limit_pfn, struct iova *new, bool size_aligned) @@ -197,7 +216,7 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad, unsigned long align_mask = ~0UL; if (size_aligned) - align_mask <<= fls_long(size - 1); + align_mask <<= limit_align(iovad, fls_long(size - 1)); /* Walk the tree backwards */ spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); @@ -230,6 +249,69 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad, return 0; } +static int __alloc_and_insert_iova_best_fit(struct iova_domain *iovad, + unsigned long size, unsigned long limit_pfn, + struct iova *new, bool size_aligned) +{ + struct rb_node *curr, *prev; + struct iova *curr_iova, *prev_iova; + unsigned long flags; + unsigned long align_mask = ~0UL; + struct rb_node *candidate_rb_parent; + unsigned long new_pfn, candidate_pfn = ~0UL; + unsigned long gap, candidate_gap = ~0UL; + + if (size_aligned) + align_mask <<= limit_align(iovad, fls_long(size - 1)); + + /* Walk the tree backwards */ + spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); + curr = &iovad->anchor.node; + prev = rb_prev(curr); + for (; prev; curr = prev, prev = rb_prev(curr)) { + curr_iova = rb_entry(curr, struct iova, node); + prev_iova = rb_entry(prev, struct iova, node); + + limit_pfn = min(limit_pfn, curr_iova->pfn_lo); + new_pfn = (limit_pfn - size) & align_mask; + gap = curr_iova->pfn_lo - prev_iova->pfn_hi - 1; + if ((limit_pfn >= size) && (new_pfn > prev_iova->pfn_hi) + && (gap < candidate_gap)) { + candidate_gap = gap; + candidate_pfn = new_pfn; + candidate_rb_parent = curr; + if (gap == size) + goto insert; + } + } + + curr_iova = rb_entry(curr, struct iova, node); + limit_pfn = min(limit_pfn, curr_iova->pfn_lo); + new_pfn = (limit_pfn - size) & align_mask; + gap = curr_iova->pfn_lo - iovad->start_pfn; + if (limit_pfn >= size && new_pfn >= iovad->start_pfn && + gap < candidate_gap) { + candidate_gap = gap; + candidate_pfn = new_pfn; + candidate_rb_parent = curr; + } + +insert: + if (candidate_pfn == ~0UL) { + spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); + return -ENOMEM; + } + + /* pfn_lo will point to size aligned address if size_aligned is set */ + new->pfn_lo = candidate_pfn; + new->pfn_hi = new->pfn_lo + size - 1; + + /* If we have 'prev', it's a valid place to start the insertion. */ + iova_insert_rbtree(&iovad->rbroot, new, candidate_rb_parent); + spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); + return 0; +} + static struct kmem_cache *iova_cache; static unsigned int iova_cache_users; static DEFINE_MUTEX(iova_cache_mutex); @@ -305,8 +387,13 @@ alloc_iova(struct iova_domain *iovad, unsigned long size, if (!new_iova) return NULL; - ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn + 1, - new_iova, size_aligned); + if (iovad->best_fit) { + ret = __alloc_and_insert_iova_best_fit(iovad, size, + limit_pfn + 1, new_iova, size_aligned); + } else { + ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn + 1, + new_iova, size_aligned); + } if (ret) { free_iova_mem(new_iova); diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c index 9f947ea3e48d2ede9be65688f4b9eb945db35f7f..c7e0ab243456f245984f9046b8f338f4b2e4137b 100644 --- a/drivers/iommu/qcom_iommu.c +++ b/drivers/iommu/qcom_iommu.c @@ -797,8 +797,11 @@ static int qcom_iommu_device_probe(struct platform_device *pdev) qcom_iommu->dev = dev; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (res) + if (res) { qcom_iommu->local_base = devm_ioremap_resource(dev, res); + if (IS_ERR(qcom_iommu->local_base)) + return PTR_ERR(qcom_iommu->local_base); + } qcom_iommu->iface_clk = devm_clk_get(dev, "iface"); if (IS_ERR(qcom_iommu->iface_clk)) { diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index f9b73336a39eee92fc8f600914809446234997bb..fe7d63cdfb1d78727f669e1077a48be071a1e079 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -2858,12 +2858,18 @@ static int its_vpe_set_irqchip_state(struct irq_data *d, return 0; } +static int its_vpe_retrigger(struct irq_data *d) +{ + return !its_vpe_set_irqchip_state(d, IRQCHIP_STATE_PENDING, true); +} + static struct irq_chip its_vpe_irq_chip = { .name = "GICv4-vpe", .irq_mask = its_vpe_mask_irq, .irq_unmask = its_vpe_unmask_irq, .irq_eoi = irq_chip_eoi_parent, .irq_set_affinity = its_vpe_set_affinity, + .irq_retrigger = its_vpe_retrigger, .irq_set_irqchip_state = its_vpe_set_irqchip_state, .irq_set_vcpu_affinity = its_vpe_set_vcpu_affinity, }; diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c index f7fdbf5d183b9ced6518238dd021760dcb8410d8..c98358be0bc8b6a4ef0fef7ab84ac531f7fc8ec4 100644 --- a/drivers/irqchip/irq-mbigen.c +++ b/drivers/irqchip/irq-mbigen.c @@ -231,10 +231,16 @@ static int mbigen_irq_domain_alloc(struct irq_domain *domain, return 0; } +static void mbigen_irq_domain_free(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs) +{ + platform_msi_domain_free(domain, virq, nr_irqs); +} + static const struct irq_domain_ops mbigen_domain_ops = { .translate = mbigen_domain_translate, .alloc = mbigen_irq_domain_alloc, - .free = irq_domain_free_irqs_common, + .free = mbigen_irq_domain_free, }; static int mbigen_of_create_domain(struct platform_device *pdev, diff --git a/drivers/irqchip/irq-versatile-fpga.c b/drivers/irqchip/irq-versatile-fpga.c index 928858dada756255cbb4c27c2ae0c191373e6513..f1386733d3bc1ded71f0c7d6ef1bf6692b1cdba1 100644 --- a/drivers/irqchip/irq-versatile-fpga.c +++ b/drivers/irqchip/irq-versatile-fpga.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include @@ -68,12 +69,16 @@ static void fpga_irq_unmask(struct irq_data *d) static void fpga_irq_handle(struct irq_desc *desc) { + struct irq_chip *chip = irq_desc_get_chip(desc); struct fpga_irq_data *f = irq_desc_get_handler_data(desc); - u32 status = readl(f->base + IRQ_STATUS); + u32 status; + + chained_irq_enter(chip, desc); + status = readl(f->base + IRQ_STATUS); if (status == 0) { do_bad_IRQ(desc); - return; + goto out; } do { @@ -82,6 +87,9 @@ static void fpga_irq_handle(struct irq_desc *desc) status &= ~(1 << irq); generic_handle_irq(irq_find_mapping(f->domain, irq)); } while (status); + +out: + chained_irq_exit(chip, desc); } /* @@ -204,6 +212,9 @@ int __init fpga_irq_of_init(struct device_node *node, if (of_property_read_u32(node, "valid-mask", &valid_mask)) valid_mask = 0; + writel(clear_mask, base + IRQ_ENABLE_CLEAR); + writel(clear_mask, base + FIQ_ENABLE_CLEAR); + /* Some chips are cascaded from a parent IRQ */ parent_irq = irq_of_parse_and_map(node, 0); if (!parent_irq) { @@ -213,9 +224,6 @@ int __init fpga_irq_of_init(struct device_node *node, fpga_irq_init(base, node->name, 0, parent_irq, valid_mask, node); - writel(clear_mask, base + IRQ_ENABLE_CLEAR); - writel(clear_mask, base + FIQ_ENABLE_CLEAR); - /* * On Versatile AB/PB, some secondary interrupts have a direct * pass-thru to the primary controller for IRQs 20 and 22-31 which need diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c index 055c90b8253cbe37749cb94338ef45b01574056b..db5aa2905d8d03125454e702d47c7640c89abcc7 100644 --- a/drivers/mailbox/mailbox.c +++ b/drivers/mailbox/mailbox.c @@ -53,7 +53,7 @@ static int add_to_rbuf(struct mbox_chan *chan, void *mssg) return idx; } -static void msg_submit(struct mbox_chan *chan) +static int __msg_submit(struct mbox_chan *chan) { unsigned count, idx; unsigned long flags; @@ -85,6 +85,24 @@ static void msg_submit(struct mbox_chan *chan) exit: spin_unlock_irqrestore(&chan->lock, flags); + return err; +} + +static void msg_submit(struct mbox_chan *chan) +{ + int err = 0; + + /* + * If the controller returns -EAGAIN, then it means, our spinlock + * here is preventing the controller from receiving its interrupt, + * that would help clear the controller channels that are currently + * blocked waiting on the interrupt response. + * Retry again. + */ + do { + err = __msg_submit(chan); + } while (err == -EAGAIN); + if (!err && (chan->txdone_method & TXDONE_BY_POLL)) /* kick start the timer immediately to avoid delays */ hrtimer_start(&chan->mbox->poll_hrt, 0, HRTIMER_MODE_REL); diff --git a/drivers/md/dm-bow.c b/drivers/md/dm-bow.c index 75fc7dc83d7b9c2e0b5c29455c192ec99ac21806..457b26c609a71687fdb751a79c3b18dc89cdaa67 100644 --- a/drivers/md/dm-bow.c +++ b/drivers/md/dm-bow.c @@ -657,6 +657,7 @@ static int dm_bow_ctr(struct dm_target *ti, unsigned int argc, char **argv) bc->dev->bdev->bd_queue->limits.max_discard_sectors = 1 << 15; bc->forward_trims = false; } else { + bc->dev->bdev->bd_queue->limits.discard_granularity = 1 << 12; bc->forward_trims = true; } diff --git a/drivers/md/dm-default-key.c b/drivers/md/dm-default-key.c index 73120426a1a0590daa3bdaac661af223152f49f7..0b10ab35fa71685c78b31070f6249697dfe09d20 100644 --- a/drivers/md/dm-default-key.c +++ b/drivers/md/dm-default-key.c @@ -38,6 +38,7 @@ static const struct dm_default_key_cipher { * @sector_size: crypto sector size in bytes (usually 4096) * @sector_bits: log2(sector_size) * @key: the encryption key to use + * @max_dun: the maximum DUN that may be used (computed from other params) */ struct default_key_c { struct dm_dev *dev; @@ -48,6 +49,7 @@ struct default_key_c { unsigned int sector_bits; struct blk_crypto_key key; bool is_hw_wrapped; + u64 max_dun; }; static const struct dm_default_key_cipher * @@ -147,6 +149,7 @@ static int default_key_ctr(struct dm_target *ti, unsigned int argc, char **argv) const struct dm_default_key_cipher *cipher; u8 raw_key[DM_DEFAULT_KEY_MAX_WRAPPED_KEY_SIZE]; unsigned int raw_key_size; + unsigned int dun_bytes; unsigned long long tmpll; char dummy; int err; @@ -230,16 +233,20 @@ static int default_key_ctr(struct dm_target *ti, unsigned int argc, char **argv) goto bad; } - err = blk_crypto_init_key(&dkc->key, raw_key, cipher->key_size, + dkc->max_dun = (dkc->iv_offset + ti->len - 1) >> + (dkc->sector_bits - SECTOR_SHIFT); + dun_bytes = DIV_ROUND_UP(fls64(dkc->max_dun), 8); + + err = blk_crypto_init_key(&dkc->key, raw_key, raw_key_size, dkc->is_hw_wrapped, cipher->mode_num, - dkc->sector_size); + dun_bytes, dkc->sector_size); if (err) { ti->error = "Error initializing blk-crypto key"; goto bad; } - err = blk_crypto_start_using_mode(cipher->mode_num, dkc->sector_size, - dkc->is_hw_wrapped, + err = blk_crypto_start_using_mode(cipher->mode_num, dun_bytes, + dkc->sector_size, dkc->is_hw_wrapped, dkc->dev->bdev->bd_queue); if (err) { ti->error = "Error starting to use blk-crypto"; @@ -300,6 +307,13 @@ static int default_key_map(struct dm_target *ti, struct bio *bio) return DM_MAPIO_KILL; dun[0] >>= dkc->sector_bits - SECTOR_SHIFT; /* crypto sectors */ + /* + * This check isn't necessary as we should have calculated max_dun + * correctly, but be safe. + */ + if (WARN_ON_ONCE(dun[0] > dkc->max_dun)) + return DM_MAPIO_KILL; + bio_crypt_set_ctx(bio, &dkc->key, dun, GFP_NOIO); return DM_MAPIO_REMAPPED; diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index def4f6ec290bef28fb295f077b12c8b193ec3aea..207ca0ad0b59df3f19009d569ac1df9a0c738766 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -586,10 +586,12 @@ static struct pgpath *__map_bio(struct multipath *m, struct bio *bio) /* Do we need to select a new pgpath? */ pgpath = READ_ONCE(m->current_pgpath); - queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags); - if (!pgpath || !queue_io) + if (!pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags)) pgpath = choose_pgpath(m, bio->bi_iter.bi_size); + /* MPATHF_QUEUE_IO might have been cleared by choose_pgpath. */ + queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags); + if ((pgpath && queue_io) || (!pgpath && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))) { /* Queue for the daemon to resubmit */ diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c index 684af08d07478cb4652ad6642fc99d9bc3be189f..bb8327999705ee683760f4153ab9d039e1958f97 100644 --- a/drivers/md/dm-verity-fec.c +++ b/drivers/md/dm-verity-fec.c @@ -436,7 +436,7 @@ int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io, fio->level++; if (type == DM_VERITY_BLOCK_TYPE_METADATA) - block += v->data_blocks; + block = block - v->hash_start + v->data_blocks; /* * For RS(M, N), the continuous FEC data is divided into blocks of N @@ -552,6 +552,7 @@ void verity_fec_dtr(struct dm_verity *v) mempool_exit(&f->rs_pool); mempool_exit(&f->prealloc_pool); mempool_exit(&f->extra_pool); + mempool_exit(&f->output_pool); kmem_cache_destroy(f->cache); if (f->data_bufio) diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c index 4e4a09054f858ee3824c9457a8c2d0e543d41fcc..4321c48eba6b9961fa088475c53c901d41df2e50 100644 --- a/drivers/md/dm-writecache.c +++ b/drivers/md/dm-writecache.c @@ -878,11 +878,30 @@ static int writecache_alloc_entries(struct dm_writecache *wc) struct wc_entry *e = &wc->entries[b]; e->index = b; e->write_in_progress = false; + cond_resched(); } return 0; } +static int writecache_read_metadata(struct dm_writecache *wc, sector_t n_sectors) +{ + struct dm_io_region region; + struct dm_io_request req; + + region.bdev = wc->ssd_dev->bdev; + region.sector = wc->start_sector; + region.count = n_sectors; + req.bi_op = REQ_OP_READ; + req.bi_op_flags = REQ_SYNC; + req.mem.type = DM_IO_VMA; + req.mem.ptr.vma = (char *)wc->memory_map; + req.client = wc->dm_io; + req.notify.fn = NULL; + + return dm_io(&req, 1, ®ion, NULL); +} + static void writecache_resume(struct dm_target *ti) { struct dm_writecache *wc = ti->private; @@ -893,8 +912,18 @@ static void writecache_resume(struct dm_target *ti) wc_lock(wc); - if (WC_MODE_PMEM(wc)) + if (WC_MODE_PMEM(wc)) { persistent_memory_invalidate_cache(wc->memory_map, wc->memory_map_size); + } else { + r = writecache_read_metadata(wc, wc->metadata_sectors); + if (r) { + size_t sb_entries_offset; + writecache_error(wc, r, "unable to read metadata: %d", r); + sb_entries_offset = offsetof(struct wc_memory_superblock, entries); + memset((char *)wc->memory_map + sb_entries_offset, -1, + (wc->metadata_sectors << SECTOR_SHIFT) - sb_entries_offset); + } + } wc->tree = RB_ROOT; INIT_LIST_HEAD(&wc->lru); @@ -932,6 +961,7 @@ static void writecache_resume(struct dm_target *ti) e->original_sector = le64_to_cpu(wme.original_sector); e->seq_count = le64_to_cpu(wme.seq_count); } + cond_resched(); } #endif for (b = 0; b < wc->n_blocks; b++) { @@ -1764,8 +1794,10 @@ static int init_memory(struct dm_writecache *wc) pmem_assign(sb(wc)->n_blocks, cpu_to_le64(wc->n_blocks)); pmem_assign(sb(wc)->seq_count, cpu_to_le64(0)); - for (b = 0; b < wc->n_blocks; b++) + for (b = 0; b < wc->n_blocks; b++) { write_original_sector_seq_count(wc, &wc->entries[b], -1, -1); + cond_resched(); + } writecache_flush_all_metadata(wc); writecache_commit_flushed(wc, false); @@ -1974,6 +2006,12 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv) ti->error = "Invalid block size"; goto bad; } + if (wc->block_size < bdev_logical_block_size(wc->dev->bdev) || + wc->block_size < bdev_logical_block_size(wc->ssd_dev->bdev)) { + r = -EINVAL; + ti->error = "Block size is smaller than device logical block size"; + goto bad; + } wc->block_size_bits = __ffs(wc->block_size); wc->max_writeback_jobs = MAX_WRITEBACK_JOBS; @@ -2062,8 +2100,6 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv) goto bad; } } else { - struct dm_io_region region; - struct dm_io_request req; size_t n_blocks, n_metadata_blocks; uint64_t n_bitmap_bits; @@ -2120,19 +2156,9 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv) goto bad; } - region.bdev = wc->ssd_dev->bdev; - region.sector = wc->start_sector; - region.count = wc->metadata_sectors; - req.bi_op = REQ_OP_READ; - req.bi_op_flags = REQ_SYNC; - req.mem.type = DM_IO_VMA; - req.mem.ptr.vma = (char *)wc->memory_map; - req.client = wc->dm_io; - req.notify.fn = NULL; - - r = dm_io(&req, 1, ®ion, NULL); + r = writecache_read_metadata(wc, wc->block_size >> SECTOR_SHIFT); if (r) { - ti->error = "Unable to read metadata"; + ti->error = "Unable to read first block of metadata"; goto bad; } } diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c index 086a870087cff81ff7fe57b8311a7e10000f6b9b..53eb21343b11f55ff736a747211f9661c6992822 100644 --- a/drivers/md/dm-zoned-metadata.c +++ b/drivers/md/dm-zoned-metadata.c @@ -1105,7 +1105,6 @@ static int dmz_init_zone(struct dmz_metadata *zmd, struct dm_zone *zone, if (blkz->type == BLK_ZONE_TYPE_CONVENTIONAL) { set_bit(DMZ_RND, &zone->flags); - zmd->nr_rnd_zones++; } else if (blkz->type == BLK_ZONE_TYPE_SEQWRITE_REQ || blkz->type == BLK_ZONE_TYPE_SEQWRITE_PREF) { set_bit(DMZ_SEQ, &zone->flags); diff --git a/drivers/md/md.c b/drivers/md/md.c index 62f214d43e15a359b668910db4cc9a3856be3432..9426976e0860d6386dad898a20c53db97a3d3444 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -5874,7 +5874,7 @@ EXPORT_SYMBOL_GPL(md_stop_writes); static void mddev_detach(struct mddev *mddev) { md_bitmap_wait_behind_writes(mddev); - if (mddev->pers && mddev->pers->quiesce) { + if (mddev->pers && mddev->pers->quiesce && !mddev->suspended) { mddev->pers->quiesce(mddev, 1); mddev->pers->quiesce(mddev, 0); } diff --git a/drivers/media/i2c/ov5695.c b/drivers/media/i2c/ov5695.c index 5d107c53364d64249374431d7c1a236ead06e4b4..be242bb8fefcdfb0b81addb3d894b2a075decbd6 100644 --- a/drivers/media/i2c/ov5695.c +++ b/drivers/media/i2c/ov5695.c @@ -974,16 +974,9 @@ static int ov5695_s_stream(struct v4l2_subdev *sd, int on) return ret; } -/* Calculate the delay in us by clock rate and clock cycles */ -static inline u32 ov5695_cal_delay(u32 cycles) -{ - return DIV_ROUND_UP(cycles, OV5695_XVCLK_FREQ / 1000 / 1000); -} - static int __ov5695_power_on(struct ov5695 *ov5695) { - int ret; - u32 delay_us; + int i, ret; struct device *dev = &ov5695->client->dev; ret = clk_prepare_enable(ov5695->xvclk); @@ -994,21 +987,28 @@ static int __ov5695_power_on(struct ov5695 *ov5695) gpiod_set_value_cansleep(ov5695->reset_gpio, 1); - ret = regulator_bulk_enable(OV5695_NUM_SUPPLIES, ov5695->supplies); - if (ret < 0) { - dev_err(dev, "Failed to enable regulators\n"); - goto disable_clk; + /* + * The hardware requires the regulators to be powered on in order, + * so enable them one by one. + */ + for (i = 0; i < OV5695_NUM_SUPPLIES; i++) { + ret = regulator_enable(ov5695->supplies[i].consumer); + if (ret) { + dev_err(dev, "Failed to enable %s: %d\n", + ov5695->supplies[i].supply, ret); + goto disable_reg_clk; + } } gpiod_set_value_cansleep(ov5695->reset_gpio, 0); - /* 8192 cycles prior to first SCCB transaction */ - delay_us = ov5695_cal_delay(8192); - usleep_range(delay_us, delay_us * 2); + usleep_range(1000, 1200); return 0; -disable_clk: +disable_reg_clk: + for (--i; i >= 0; i--) + regulator_disable(ov5695->supplies[i].consumer); clk_disable_unprepare(ov5695->xvclk); return ret; @@ -1016,9 +1016,22 @@ static int __ov5695_power_on(struct ov5695 *ov5695) static void __ov5695_power_off(struct ov5695 *ov5695) { + struct device *dev = &ov5695->client->dev; + int i, ret; + clk_disable_unprepare(ov5695->xvclk); gpiod_set_value_cansleep(ov5695->reset_gpio, 1); - regulator_bulk_disable(OV5695_NUM_SUPPLIES, ov5695->supplies); + + /* + * The hardware requires the regulators to be powered off in order, + * so disable them one by one. + */ + for (i = OV5695_NUM_SUPPLIES - 1; i >= 0; i--) { + ret = regulator_disable(ov5695->supplies[i].consumer); + if (ret) + dev_err(dev, "Failed to disable %s: %d\n", + ov5695->supplies[i].supply, ret); + } } static int __maybe_unused ov5695_runtime_resume(struct device *dev) @@ -1288,7 +1301,7 @@ static int ov5695_probe(struct i2c_client *client, if (clk_get_rate(ov5695->xvclk) != OV5695_XVCLK_FREQ) dev_warn(dev, "xvclk mismatched, modes are based on 24MHz\n"); - ov5695->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW); + ov5695->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(ov5695->reset_gpio)) { dev_err(dev, "Failed to get reset-gpios\n"); return -EINVAL; diff --git a/drivers/media/i2c/video-i2c.c b/drivers/media/i2c/video-i2c.c index f27d294dcbef5faf543c8c04147431df2b8e9121..dd50acc085d89f919bf86170190e94b8f3cac472 100644 --- a/drivers/media/i2c/video-i2c.c +++ b/drivers/media/i2c/video-i2c.c @@ -105,7 +105,7 @@ static int amg88xx_xfer(struct video_i2c_data *data, char *buf) return (ret == 2) ? 0 : -EIO; } -#if IS_ENABLED(CONFIG_HWMON) +#if IS_REACHABLE(CONFIG_HWMON) static const u32 amg88xx_temp_config[] = { HWMON_T_INPUT, diff --git a/drivers/media/platform/qcom/venus/hfi_parser.c b/drivers/media/platform/qcom/venus/hfi_parser.c index 2293d936e49ca217883f5ee6d494f6469fc83a24..7f515a4b9bd1257152f87c3b17ec1cb046540aed 100644 --- a/drivers/media/platform/qcom/venus/hfi_parser.c +++ b/drivers/media/platform/qcom/venus/hfi_parser.c @@ -181,6 +181,7 @@ static void parse_codecs(struct venus_core *core, void *data) if (IS_V1(core)) { core->dec_codecs &= ~HFI_VIDEO_CODEC_HEVC; core->dec_codecs &= ~HFI_VIDEO_CODEC_SPARK; + core->enc_codecs &= ~HFI_VIDEO_CODEC_HEVC; } } diff --git a/drivers/media/platform/ti-vpe/cal.c b/drivers/media/platform/ti-vpe/cal.c index d1febe5baa6dd9b45c2482e1e2d32c373bbd355f..be3155275a6bac10b2f20a6054d8e0f8c5df9485 100644 --- a/drivers/media/platform/ti-vpe/cal.c +++ b/drivers/media/platform/ti-vpe/cal.c @@ -541,16 +541,16 @@ static void enable_irqs(struct cal_ctx *ctx) static void disable_irqs(struct cal_ctx *ctx) { + u32 val; + /* Disable IRQ_WDMA_END 0/1 */ - reg_write_field(ctx->dev, - CAL_HL_IRQENABLE_CLR(2), - CAL_HL_IRQ_CLEAR, - CAL_HL_IRQ_MASK(ctx->csi2_port)); + val = 0; + set_field(&val, CAL_HL_IRQ_CLEAR, CAL_HL_IRQ_MASK(ctx->csi2_port)); + reg_write(ctx->dev, CAL_HL_IRQENABLE_CLR(2), val); /* Disable IRQ_WDMA_START 0/1 */ - reg_write_field(ctx->dev, - CAL_HL_IRQENABLE_CLR(3), - CAL_HL_IRQ_CLEAR, - CAL_HL_IRQ_MASK(ctx->csi2_port)); + val = 0; + set_field(&val, CAL_HL_IRQ_CLEAR, CAL_HL_IRQ_MASK(ctx->csi2_port)); + reg_write(ctx->dev, CAL_HL_IRQENABLE_CLR(3), val); /* Todo: Add VC_IRQ and CSI2_COMPLEXIO_IRQ handling */ reg_write(ctx->dev, CAL_CSI2_VC_IRQENABLE(1), 0); } diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c index f862f1b7f99657c6cff3a816a68bc853d2d01033..d6f5f5b3f75f247897acee52446003713854f3da 100644 --- a/drivers/media/rc/lirc_dev.c +++ b/drivers/media/rc/lirc_dev.c @@ -29,7 +29,7 @@ #include "rc-core-priv.h" #include -#define LIRCBUF_SIZE 256 +#define LIRCBUF_SIZE 1024 static dev_t lirc_base_dev; diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c index 7c80d9b38d609af8fcd53c9674165a7170b2fae6..eadf673c442cfde249abbd4273877d4fd53fb3e0 100644 --- a/drivers/media/v4l2-core/v4l2-ioctl.c +++ b/drivers/media/v4l2-core/v4l2-ioctl.c @@ -1295,6 +1295,14 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt) case V4L2_META_FMT_VSP1_HGO: descr = "R-Car VSP1 1-D Histogram"; break; case V4L2_META_FMT_VSP1_HGT: descr = "R-Car VSP1 2-D Histogram"; break; case V4L2_META_FMT_UVC: descr = "UVC payload header metadata"; break; + case V4L2_PIX_FMT_NV12_UBWC: + descr = "NV12 UBWC"; break; + case V4L2_PIX_FMT_SDE_Y_CBCR_H2V2_P010_VENUS: + descr = "Y/CbCr 4:2:0 P10 Venus"; break; + case V4L2_PIX_FMT_NV12_TP10_UBWC: + descr = "Y/CbCr 4:2:0 TP10 UBWC"; break; + case V4L2_PIX_FMT_NV12_512: + descr = "Y/CbCr 4:2:0 (512 align)"; break; default: /* Compressed formats */ diff --git a/drivers/mfd/dln2.c b/drivers/mfd/dln2.c index 1476465ce803b1fbe19bf8fc218fb3d747188d03..6ea0dd37b4535a1497313d05c60d4ee90598c4ed 100644 --- a/drivers/mfd/dln2.c +++ b/drivers/mfd/dln2.c @@ -93,6 +93,11 @@ struct dln2_mod_rx_slots { spinlock_t lock; }; +enum dln2_endpoint { + DLN2_EP_OUT = 0, + DLN2_EP_IN = 1, +}; + struct dln2_dev { struct usb_device *usb_dev; struct usb_interface *interface; @@ -736,10 +741,10 @@ static int dln2_probe(struct usb_interface *interface, hostif->desc.bNumEndpoints < 2) return -ENODEV; - epin = &hostif->endpoint[0].desc; - epout = &hostif->endpoint[1].desc; + epout = &hostif->endpoint[DLN2_EP_OUT].desc; if (!usb_endpoint_is_bulk_out(epout)) return -ENODEV; + epin = &hostif->endpoint[DLN2_EP_IN].desc; if (!usb_endpoint_is_bulk_in(epin)) return -ENODEV; diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c index 95e217e6b6d757e2e5b1621bac2be269387dec54..7577afd4284280ad001e245087d908b5d5f46989 100644 --- a/drivers/mfd/intel-lpss.c +++ b/drivers/mfd/intel-lpss.c @@ -397,7 +397,7 @@ int intel_lpss_probe(struct device *dev, if (!lpss) return -ENOMEM; - lpss->priv = devm_ioremap(dev, info->mem->start + LPSS_PRIV_OFFSET, + lpss->priv = devm_ioremap_uc(dev, info->mem->start + LPSS_PRIV_OFFSET, LPSS_PRIV_SIZE); if (!lpss->priv) return -ENOMEM; diff --git a/drivers/misc/cardreader/rts5227.c b/drivers/misc/cardreader/rts5227.c index 13645be5f3b598c88e6fb977f3063d26dd2939f1..f0e845c8e6a76fdeb76a2a0b5c8df21627a681fd 100644 --- a/drivers/misc/cardreader/rts5227.c +++ b/drivers/misc/cardreader/rts5227.c @@ -369,6 +369,7 @@ static const struct pcr_ops rts522a_pcr_ops = { void rts522a_init_params(struct rtsx_pcr *pcr) { rts5227_init_params(pcr); + pcr->ops = &rts522a_pcr_ops; pcr->tx_initial_phase = SET_CLOCK_PHASE(20, 20, 11); pcr->reg_pm_ctrl3 = RTS522A_PM_CTRL3; } diff --git a/drivers/misc/echo/echo.c b/drivers/misc/echo/echo.c index 8a5adc0d2e8873f1094c653d01bb12f610c41bab..3ebe5d75ad6a21932d4add792d1b55b765a64d99 100644 --- a/drivers/misc/echo/echo.c +++ b/drivers/misc/echo/echo.c @@ -381,7 +381,7 @@ int16_t oslec_update(struct oslec_state *ec, int16_t tx, int16_t rx) */ ec->factor = 0; ec->shift = 0; - if ((ec->nonupdate_dwell == 0)) { + if (!ec->nonupdate_dwell) { int p, logp, shift; /* Determine: diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h index d80372d21c14f09e6d4abaca70d71957e2ecbe74..2ac1dc5104b7a5dc63859aa07556f51bceb6a511 100644 --- a/drivers/misc/mei/hw-me-regs.h +++ b/drivers/misc/mei/hw-me-regs.h @@ -147,6 +147,8 @@ #define MEI_DEV_ID_CMP_H 0x06e0 /* Comet Lake H */ #define MEI_DEV_ID_CMP_H_3 0x06e4 /* Comet Lake H 3 (iTouch) */ +#define MEI_DEV_ID_CDF 0x18D3 /* Cedar Fork */ + #define MEI_DEV_ID_ICP_LP 0x34E0 /* Ice Lake Point LP */ #define MEI_DEV_ID_TGP_LP 0xA0E0 /* Tiger Lake Point LP */ diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index 3498c10b8263ba43dffd398ece29dc5e7e4b8f70..b4bf12f27caf5b019f53e90432e4addbce5bb906 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c @@ -118,6 +118,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = { {MEI_PCI_DEVICE(MEI_DEV_ID_MCC, MEI_ME_PCH12_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_MCC_4, MEI_ME_PCH8_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_CDF, MEI_ME_PCH8_CFG)}, + /* required last entry */ {0, } }; diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c index fd33a3b9c66f6248ae831972b6111ed6e769f310..727dc6ec427dda9b801de108873b7bc27d53bb39 100644 --- a/drivers/misc/pci_endpoint_test.c +++ b/drivers/misc/pci_endpoint_test.c @@ -104,6 +104,7 @@ struct pci_endpoint_test { struct completion irq_raised; int last_irq; int num_irqs; + int irq_type; /* mutex to protect the ioctls */ struct mutex mutex; struct miscdevice miscdev; @@ -163,6 +164,7 @@ static void pci_endpoint_test_free_irq_vectors(struct pci_endpoint_test *test) struct pci_dev *pdev = test->pdev; pci_free_irq_vectors(pdev); + test->irq_type = IRQ_TYPE_UNDEFINED; } static bool pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test *test, @@ -197,6 +199,8 @@ static bool pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test *test, irq = 0; res = false; } + + test->irq_type = type; test->num_irqs = irq; return res; @@ -336,6 +340,7 @@ static bool pci_endpoint_test_copy(struct pci_endpoint_test *test, size_t size) dma_addr_t orig_dst_phys_addr; size_t offset; size_t alignment = test->alignment; + int irq_type = test->irq_type; u32 src_crc32; u32 dst_crc32; @@ -432,6 +437,7 @@ static bool pci_endpoint_test_write(struct pci_endpoint_test *test, size_t size) dma_addr_t orig_phys_addr; size_t offset; size_t alignment = test->alignment; + int irq_type = test->irq_type; u32 crc32; if (size > SIZE_MAX - alignment) @@ -500,6 +506,7 @@ static bool pci_endpoint_test_read(struct pci_endpoint_test *test, size_t size) dma_addr_t orig_phys_addr; size_t offset; size_t alignment = test->alignment; + int irq_type = test->irq_type; u32 crc32; if (size > SIZE_MAX - alignment) @@ -561,7 +568,7 @@ static bool pci_endpoint_test_set_irq(struct pci_endpoint_test *test, return false; } - if (irq_type == req_irq_type) + if (test->irq_type == req_irq_type) return true; pci_endpoint_test_release_irq(test); @@ -573,12 +580,10 @@ static bool pci_endpoint_test_set_irq(struct pci_endpoint_test *test, if (!pci_endpoint_test_request_irq(test)) goto err; - irq_type = req_irq_type; return true; err: pci_endpoint_test_free_irq_vectors(test); - irq_type = IRQ_TYPE_UNDEFINED; return false; } @@ -636,7 +641,7 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev, { int err; int id; - char name[20]; + char name[24]; enum pci_barno bar; void __iomem *base; struct device *dev = &pdev->dev; @@ -655,6 +660,7 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev, test->test_reg_bar = 0; test->alignment = 0; test->pdev = pdev; + test->irq_type = IRQ_TYPE_UNDEFINED; if (no_msi) irq_type = IRQ_TYPE_LEGACY; diff --git a/drivers/misc/uid_sys_stats.c b/drivers/misc/uid_sys_stats.c index f6a6a3e1344e61f0b428a1be5ee6ec882b18a4eb..cb027ed1f8c35ca5415dbd60f0e4b4c8792c48a1 100644 --- a/drivers/misc/uid_sys_stats.c +++ b/drivers/misc/uid_sys_stats.c @@ -126,7 +126,7 @@ static void get_full_task_comm(struct task_entry *task_entry, int i = 0, offset = 0, len = 0; /* save one byte for terminating null character */ int unused_len = MAX_TASK_COMM_LEN - TASK_COMM_LEN - 1; - char buf[unused_len]; + char buf[MAX_TASK_COMM_LEN - TASK_COMM_LEN - 1]; struct mm_struct *mm = task->mm; /* fill the first TASK_COMM_LEN bytes with thread name */ diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig index 42e89060cd41ecb766dbc3c4b2c98b6037decae1..ed68de663f6d86dd4764462367b5217266c13de8 100644 --- a/drivers/mmc/core/Kconfig +++ b/drivers/mmc/core/Kconfig @@ -80,3 +80,11 @@ config MMC_TEST This driver is only of interest to those developing or testing a host driver. Most people should say N here. +config MMC_CRYPTO + bool "MMC Crypto Engine Support" + depends on BLK_INLINE_ENCRYPTION + help + Enable Crypto Engine Support in MMC. + Enabling this makes it possible for the kernel to use the crypto + capabilities of the MMC device (if present) to perform crypto + operations on data being transferred to/from the device. diff --git a/drivers/mmc/core/Makefile b/drivers/mmc/core/Makefile index abba078f7f4903eac35671aa3db54823362e86fb..eb8c76901f2b3e92a2d792439104ce6a6733f1a6 100644 --- a/drivers/mmc/core/Makefile +++ b/drivers/mmc/core/Makefile @@ -18,3 +18,4 @@ obj-$(CONFIG_MMC_BLOCK) += mmc_block.o mmc_block-objs := block.o queue.o obj-$(CONFIG_MMC_TEST) += mmc_test.o obj-$(CONFIG_SDIO_UART) += sdio_uart.o +mmc_core-$(CONFIG_MMC_CRYPTO) += crypto.o diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 60eac66dc9f0b3920cad51af7a770cf9cb5dd2bf..9b1d396ae8bf29cee27bcae265909d4d5744014f 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -51,6 +51,7 @@ #include "block.h" #include "core.h" #include "card.h" +#include "crypto.h" #include "host.h" #include "bus.h" #include "mmc_ops.h" @@ -1304,6 +1305,8 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq, memset(brq, 0, sizeof(struct mmc_blk_request)); + mmc_crypto_prepare_req(mqrq); + brq->mrq.data = &brq->data; brq->mrq.tag = req->tag; diff --git a/drivers/mmc/core/crypto.c b/drivers/mmc/core/crypto.c new file mode 100644 index 0000000000000000000000000000000000000000..661e7f862ffce49a3a25fb0547294fb78b86db99 --- /dev/null +++ b/drivers/mmc/core/crypto.c @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2020 Google LLC + */ + +#include +#include +#include +#include + +#include "core.h" +#include "queue.h" + +void mmc_crypto_setup_queue(struct mmc_host *host, struct request_queue *q) +{ + if (host->caps2 & MMC_CAP2_CRYPTO) + q->ksm = host->ksm; +} +EXPORT_SYMBOL_GPL(mmc_crypto_setup_queue); + +void mmc_crypto_free_host(struct mmc_host *host) +{ + keyslot_manager_destroy(host->ksm); +} + +void mmc_crypto_prepare_req(struct mmc_queue_req *mqrq) +{ + struct request *req = mmc_queue_req_to_req(mqrq); + struct mmc_request *mrq = &mqrq->brq.mrq; + const struct bio_crypt_ctx *bc; + + if (!bio_crypt_should_process(req)) + return; + + bc = req->bio->bi_crypt_context; + mrq->crypto_key_slot = bc->bc_keyslot; + mrq->data_unit_num = bc->bc_dun[0]; + mrq->crypto_key = bc->bc_key; +} +EXPORT_SYMBOL_GPL(mmc_crypto_prepare_req); diff --git a/drivers/mmc/core/crypto.h b/drivers/mmc/core/crypto.h new file mode 100644 index 0000000000000000000000000000000000000000..74145c36241b6936186f6582795af2c165b27b24 --- /dev/null +++ b/drivers/mmc/core/crypto.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright 2020 Google LLC + */ + +#ifndef _MMC_CORE_CRYPTO_H +#define _MMC_CORE_CRYPTO_H + +struct mmc_host; +struct mmc_queue_req; +struct request; +struct request_queue; + +#ifdef CONFIG_MMC_CRYPTO + +void mmc_crypto_setup_queue(struct mmc_host *host, struct request_queue *q); + +void mmc_crypto_free_host(struct mmc_host *host); + +void mmc_crypto_prepare_req(struct mmc_queue_req *mqrq); + +#else /* CONFIG_MMC_CRYPTO */ + +static inline void mmc_crypto_setup_queue(struct mmc_host *host, + struct request_queue *q) { } + +static inline void mmc_crypto_free_host(struct mmc_host *host) { } + +static inline void mmc_crypto_prepare_req(struct mmc_queue_req *mqrq) { } + +#endif /* CONFIG_MMC_CRYPTO */ + +#endif /* _MMC_CORE_CRYPTO_H */ diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index 8a79f0af09bb9a5d6390d97e0a2a44f4aefb4837..171332cbbe09bf162fbce9db430214450ce3474c 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c @@ -27,6 +27,7 @@ #include #include "core.h" +#include "crypto.h" #include "host.h" #include "slot-gpio.h" #include "pwrseq.h" @@ -480,6 +481,7 @@ EXPORT_SYMBOL(mmc_remove_host); */ void mmc_free_host(struct mmc_host *host) { + mmc_crypto_free_host(host); mmc_pwrseq_free(host); put_device(&host->class_dev); } diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c index becc6594a8a47cf05dc5bbb7582d27324091b7b6..cee297c944e04c353c5ab9e848038df50caf20dc 100644 --- a/drivers/mmc/core/queue.c +++ b/drivers/mmc/core/queue.c @@ -21,6 +21,7 @@ #include "queue.h" #include "block.h" #include "core.h" +#include "crypto.h" #include "card.h" #include "host.h" @@ -448,6 +449,8 @@ static int mmc_mq_init(struct mmc_queue *mq, struct mmc_card *card, mmc_setup_queue(mq, card); + mmc_crypto_setup_queue(host, mq->queue); + return 0; } diff --git a/drivers/mmc/host/cqhci.c b/drivers/mmc/host/cqhci.c index 28f5aaca505acb668468b7ac34cd812e90219f36..2c5a6e7aadc01123f5815cfbe80030b3edf2fa57 100644 --- a/drivers/mmc/host/cqhci.c +++ b/drivers/mmc/host/cqhci.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -351,12 +352,16 @@ static int cqhci_enable(struct mmc_host *mmc, struct mmc_card *card) /* CQHCI is idle and should halt immediately, so set a small timeout */ #define CQHCI_OFF_TIMEOUT 100 +static u32 cqhci_read_ctl(struct cqhci_host *cq_host) +{ + return cqhci_readl(cq_host, CQHCI_CTL); +} + static void cqhci_off(struct mmc_host *mmc) { struct cqhci_host *cq_host = mmc->cqe_private; - ktime_t timeout; - bool timed_out; u32 reg; + int err; if (!cq_host->enabled || !mmc->cqe_on || cq_host->recovery_halt) return; @@ -366,15 +371,9 @@ static void cqhci_off(struct mmc_host *mmc) cqhci_writel(cq_host, CQHCI_HALT, CQHCI_CTL); - timeout = ktime_add_us(ktime_get(), CQHCI_OFF_TIMEOUT); - while (1) { - timed_out = ktime_compare(ktime_get(), timeout) > 0; - reg = cqhci_readl(cq_host, CQHCI_CTL); - if ((reg & CQHCI_HALT) || timed_out) - break; - } - - if (timed_out) + err = readx_poll_timeout(cqhci_read_ctl, cq_host, reg, + reg & CQHCI_HALT, 0, CQHCI_OFF_TIMEOUT); + if (err < 0) pr_err("%s: cqhci: CQE stuck on\n", mmc_hostname(mmc)); else pr_debug("%s: cqhci: CQE off\n", mmc_hostname(mmc)); diff --git a/drivers/mmc/host/meson-mx-sdio.c b/drivers/mmc/host/meson-mx-sdio.c index f6c76be2be0d3fa0d5b93bdd1d79acee793f7a9d..1c062473b1c2305ff30472c2283389196b56bf75 100644 --- a/drivers/mmc/host/meson-mx-sdio.c +++ b/drivers/mmc/host/meson-mx-sdio.c @@ -360,14 +360,6 @@ static void meson_mx_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) meson_mx_mmc_start_cmd(mmc, mrq->cmd); } -static int meson_mx_mmc_card_busy(struct mmc_host *mmc) -{ - struct meson_mx_mmc_host *host = mmc_priv(mmc); - u32 irqc = readl(host->base + MESON_MX_SDIO_IRQC); - - return !!(irqc & MESON_MX_SDIO_IRQC_FORCE_DATA_DAT_MASK); -} - static void meson_mx_mmc_read_response(struct mmc_host *mmc, struct mmc_command *cmd) { @@ -509,7 +501,6 @@ static void meson_mx_mmc_timeout(struct timer_list *t) static struct mmc_host_ops meson_mx_mmc_ops = { .request = meson_mx_mmc_request, .set_ios = meson_mx_mmc_set_ios, - .card_busy = meson_mx_mmc_card_busy, .get_cd = mmc_gpio_get_cd, .get_ro = mmc_gpio_get_ro, }; @@ -573,7 +564,7 @@ static int meson_mx_mmc_add_host(struct meson_mx_mmc_host *host) mmc->f_max = clk_round_rate(host->cfg_div_clk, clk_get_rate(host->parent_clk)); - mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23; + mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23 | MMC_CAP_WAIT_WHILE_BUSY; mmc->ops = &meson_mx_mmc_ops; ret = mmc_of_parse(mmc); diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c index 17b2054d9b62f94fcda818250310264ba93182fa..19ae527ecc724d801d54385f83b73f6f19fa1aaf 100644 --- a/drivers/mmc/host/sdhci-msm.c +++ b/drivers/mmc/host/sdhci-msm.c @@ -1909,6 +1909,8 @@ static int sdhci_msm_probe(struct platform_device *pdev) goto clk_disable; } + msm_host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_NEED_RSP_BUSY; + pm_runtime_get_noresume(&pdev->dev); pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c index 65985dc3e1a74a588e4ab758e080a5dde6e84cd2..35168b47afe6cf645e84f00a07b747ee575f91db 100644 --- a/drivers/mmc/host/sdhci-pci-core.c +++ b/drivers/mmc/host/sdhci-pci-core.c @@ -556,6 +556,9 @@ static int intel_select_drive_strength(struct mmc_card *card, struct sdhci_pci_slot *slot = sdhci_priv(host); struct intel_host *intel_host = sdhci_pci_priv(slot); + if (!(mmc_driver_type_mask(intel_host->drv_strength) & card_drv)) + return 0; + return intel_host->drv_strength; } diff --git a/drivers/mmc/host/sdhci-xenon.c b/drivers/mmc/host/sdhci-xenon.c index a0b5089b3274821b6e84f16e57ffebd08a184cae..fafb02644efde7726ff663d15834477e0b5e4a28 100644 --- a/drivers/mmc/host/sdhci-xenon.c +++ b/drivers/mmc/host/sdhci-xenon.c @@ -238,6 +238,16 @@ static void xenon_voltage_switch(struct sdhci_host *host) { /* Wait for 5ms after set 1.8V signal enable bit */ usleep_range(5000, 5500); + + /* + * For some reason the controller's Host Control2 register reports + * the bit representing 1.8V signaling as 0 when read after it was + * written as 1. Subsequent read reports 1. + * + * Since this may cause some issues, do an empty read of the Host + * Control2 register here to circumvent this. + */ + sdhci_readw(host, SDHCI_HOST_CONTROL2); } static const struct sdhci_ops sdhci_xenon_ops = { diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c index ba44ea6d497ee6c88f1258504a1185db51082e57..1dbc9554a0786851647cacacb08bd642337150ec 100644 --- a/drivers/mtd/chips/cfi_cmdset_0002.c +++ b/drivers/mtd/chips/cfi_cmdset_0002.c @@ -1882,7 +1882,11 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, continue; } - if (time_after(jiffies, timeo) && !chip_ready(map, adr)) + /* + * We check "time_after" and "!chip_good" before checking "chip_good" to avoid + * the failure due to scheduling. + */ + if (time_after(jiffies, timeo) && !chip_good(map, adr, datum)) break; if (chip_good(map, adr, datum)) { diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c index 9ee04b5f931139cbb6d119c6901c39a4a39cda0f..5a04ff63868817ddf2b2910db80683b608d8fa74 100644 --- a/drivers/mtd/devices/phram.c +++ b/drivers/mtd/devices/phram.c @@ -240,22 +240,25 @@ static int phram_setup(const char *val) ret = parse_num64(&start, token[1]); if (ret) { - kfree(name); parse_err("illegal start address\n"); + goto error; } ret = parse_num64(&len, token[2]); if (ret) { - kfree(name); parse_err("illegal device length\n"); + goto error; } ret = register_device(name, start, len); - if (!ret) - pr_info("%s device: %#llx at %#llx\n", name, len, start); - else - kfree(name); + if (ret) + goto error; + + pr_info("%s device: %#llx at %#llx\n", name, len, start); + return 0; +error: + kfree(name); return ret; } diff --git a/drivers/mtd/lpddr/lpddr_cmds.c b/drivers/mtd/lpddr/lpddr_cmds.c index b13557fe52bd0b3d4589eeb7ae7c890bf69eca77..947bb710bf16d1f50d8e2d5917569268aaad7f7b 100644 --- a/drivers/mtd/lpddr/lpddr_cmds.c +++ b/drivers/mtd/lpddr/lpddr_cmds.c @@ -81,7 +81,6 @@ struct mtd_info *lpddr_cmdset(struct map_info *map) shared = kmalloc_array(lpddr->numchips, sizeof(struct flchip_shared), GFP_KERNEL); if (!shared) { - kfree(lpddr); kfree(mtd); return NULL; } diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c index 48b3ab26b12492d8b2efbc41e1d8eb4a5eda72d6..a2f38b3b97763046821703e6f2467378c2629c60 100644 --- a/drivers/mtd/nand/spi/core.c +++ b/drivers/mtd/nand/spi/core.c @@ -629,18 +629,18 @@ static int spinand_mtd_write(struct mtd_info *mtd, loff_t to, static bool spinand_isbad(struct nand_device *nand, const struct nand_pos *pos) { struct spinand_device *spinand = nand_to_spinand(nand); + u8 marker[2] = { }; struct nand_page_io_req req = { .pos = *pos, - .ooblen = 2, + .ooblen = sizeof(marker), .ooboffs = 0, - .oobbuf.in = spinand->oobbuf, + .oobbuf.in = marker, .mode = MTD_OPS_RAW, }; - memset(spinand->oobbuf, 0, 2); spinand_select_target(spinand, pos->target); spinand_read_page(spinand, &req, false); - if (spinand->oobbuf[0] != 0xff || spinand->oobbuf[1] != 0xff) + if (marker[0] != 0xff || marker[1] != 0xff) return true; return false; @@ -664,15 +664,16 @@ static int spinand_mtd_block_isbad(struct mtd_info *mtd, loff_t offs) static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos) { struct spinand_device *spinand = nand_to_spinand(nand); + u8 marker[2] = { }; struct nand_page_io_req req = { .pos = *pos, .ooboffs = 0, - .ooblen = 2, - .oobbuf.out = spinand->oobbuf, + .ooblen = sizeof(marker), + .oobbuf.out = marker, + .mode = MTD_OPS_RAW, }; int ret; - /* Erase block before marking it bad. */ ret = spinand_select_target(spinand, pos->target); if (ret) return ret; @@ -681,9 +682,6 @@ static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos) if (ret) return ret; - spinand_erase_op(spinand, pos); - - memset(spinand->oobbuf, 0, 2); return spinand_write_page(spinand, &req); } diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c index db96078096206996cef1b18cc461970e83af0c4c..f99cd94509be30125e48bfc4bd30830289f42b18 100644 --- a/drivers/net/can/slcan.c +++ b/drivers/net/can/slcan.c @@ -147,7 +147,7 @@ static void slc_bump(struct slcan *sl) u32 tmpid; char *cmd = sl->rbuff; - cf.can_id = 0; + memset(&cf, 0, sizeof(cf)); switch (*cmd) { case 'r': @@ -186,8 +186,6 @@ static void slc_bump(struct slcan *sl) else return; - *(u64 *) (&cf.data) = 0; /* clear payload */ - /* RTR frames may have a dlc > 0 but they never have any data bytes */ if (!(cf.can_id & CAN_RTR_FLAG)) { for (i = 0; i < cf.can_dlc; i++) { diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index ac5d945b934a04f01ce8148193150a01c783155b..11f3993ab7f307ed266bb4febb43095871b3c76f 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -1253,6 +1253,10 @@ static int b53_arl_rw_op(struct b53_device *dev, unsigned int op) reg |= ARLTBL_RW; else reg &= ~ARLTBL_RW; + if (dev->vlan_enabled) + reg &= ~ARLTBL_IVL_SVL_SELECT; + else + reg |= ARLTBL_IVL_SVL_SELECT; b53_write8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, reg); return b53_arl_op_wait(dev); @@ -1262,6 +1266,7 @@ static int b53_arl_read(struct b53_device *dev, u64 mac, u16 vid, struct b53_arl_entry *ent, u8 *idx, bool is_valid) { + DECLARE_BITMAP(free_bins, B53_ARLTBL_MAX_BIN_ENTRIES); unsigned int i; int ret; @@ -1269,6 +1274,8 @@ static int b53_arl_read(struct b53_device *dev, u64 mac, if (ret) return ret; + bitmap_zero(free_bins, dev->num_arl_entries); + /* Read the bins */ for (i = 0; i < dev->num_arl_entries; i++) { u64 mac_vid; @@ -1280,13 +1287,24 @@ static int b53_arl_read(struct b53_device *dev, u64 mac, B53_ARLTBL_DATA_ENTRY(i), &fwd_entry); b53_arl_to_entry(ent, mac_vid, fwd_entry); - if (!(fwd_entry & ARLTBL_VALID)) + if (!(fwd_entry & ARLTBL_VALID)) { + set_bit(i, free_bins); continue; + } if ((mac_vid & ARLTBL_MAC_MASK) != mac) continue; + if (dev->vlan_enabled && + ((mac_vid >> ARLTBL_VID_S) & ARLTBL_VID_MASK) != vid) + continue; *idx = i; + return 0; } + if (bitmap_weight(free_bins, dev->num_arl_entries) == 0) + return -ENOSPC; + + *idx = find_first_bit(free_bins, dev->num_arl_entries); + return -ENOENT; } @@ -1316,10 +1334,21 @@ static int b53_arl_op(struct b53_device *dev, int op, int port, if (op) return ret; - /* We could not find a matching MAC, so reset to a new entry */ - if (ret) { + switch (ret) { + case -ENOSPC: + dev_dbg(dev->dev, "{%pM,%.4d} no space left in ARL\n", + addr, vid); + return is_valid ? ret : 0; + case -ENOENT: + /* We could not find a matching MAC, so reset to a new entry */ + dev_dbg(dev->dev, "{%pM,%.4d} not found, using idx: %d\n", + addr, vid, idx); fwd_entry = 0; - idx = 1; + break; + default: + dev_dbg(dev->dev, "{%pM,%.4d} found, using idx: %d\n", + addr, vid, idx); + break; } memset(&ent, 0, sizeof(ent)); diff --git a/drivers/net/dsa/b53/b53_regs.h b/drivers/net/dsa/b53/b53_regs.h index 2a9f421680aad625d8e12020ce853808685eeb84..c90985c294a2e72888229963b89174e01bb6a978 100644 --- a/drivers/net/dsa/b53/b53_regs.h +++ b/drivers/net/dsa/b53/b53_regs.h @@ -292,6 +292,7 @@ /* ARL Table Read/Write Register (8 bit) */ #define B53_ARLTBL_RW_CTRL 0x00 #define ARLTBL_RW BIT(0) +#define ARLTBL_IVL_SVL_SELECT BIT(6) #define ARLTBL_START_DONE BIT(7) /* MAC Address Index Register (48 bit) */ @@ -304,7 +305,7 @@ * * BCM5325 and BCM5365 share most definitions below */ -#define B53_ARLTBL_MAC_VID_ENTRY(n) (0x10 * (n)) +#define B53_ARLTBL_MAC_VID_ENTRY(n) ((0x10 * (n)) + 0x10) #define ARLTBL_MAC_MASK 0xffffffffffffULL #define ARLTBL_VID_S 48 #define ARLTBL_VID_MASK_25 0xff @@ -316,13 +317,16 @@ #define ARLTBL_VALID_25 BIT(63) /* ARL Table Data Entry N Registers (32 bit) */ -#define B53_ARLTBL_DATA_ENTRY(n) ((0x10 * (n)) + 0x08) +#define B53_ARLTBL_DATA_ENTRY(n) ((0x10 * (n)) + 0x18) #define ARLTBL_DATA_PORT_ID_MASK 0x1ff #define ARLTBL_TC(tc) ((3 & tc) << 11) #define ARLTBL_AGE BIT(14) #define ARLTBL_STATIC BIT(15) #define ARLTBL_VALID BIT(16) +/* Maximum number of bin entries in the ARL for all switches */ +#define B53_ARLTBL_MAX_BIN_ENTRIES 4 + /* ARL Search Control Register (8 bit) */ #define B53_ARL_SRCH_CTL 0x50 #define B53_ARL_SRCH_CTL_25 0x20 diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 8c69789fbe094ed0ae9d89415bbc00917057adba..ccba648452c42dc8d491001603f03bfbf645fdf5 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -461,7 +461,7 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds) priv->slave_mii_bus->parent = ds->dev->parent; priv->slave_mii_bus->phy_mask = ~priv->indir_phy_mask; - err = of_mdiobus_register(priv->slave_mii_bus, dn); + err = mdiobus_register(priv->slave_mii_bus); if (err && dn) of_node_put(dn); @@ -1014,6 +1014,7 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev) const struct bcm_sf2_of_data *data; struct b53_platform_data *pdata; struct dsa_switch_ops *ops; + struct device_node *ports; struct bcm_sf2_priv *priv; struct b53_device *dev; struct dsa_switch *ds; @@ -1077,7 +1078,11 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev) set_bit(0, priv->cfp.used); set_bit(0, priv->cfp.unique); - bcm_sf2_identify_ports(priv, dn->child); + ports = of_find_node_by_name(dn, "ports"); + if (ports) { + bcm_sf2_identify_ports(priv, ports); + of_node_put(ports); + } priv->irq0 = irq_of_parse_and_map(dn, 0); priv->irq1 = irq_of_parse_and_map(dn, 1); diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c index 21db1804e85d9370d0331e3dc7091536b705312a..12156ab186a10f00874317524c0bc6f931f79884 100644 --- a/drivers/net/dsa/bcm_sf2_cfp.c +++ b/drivers/net/dsa/bcm_sf2_cfp.c @@ -742,17 +742,14 @@ static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port, fs->m_ext.data[1])) return -EINVAL; - if (fs->location != RX_CLS_LOC_ANY && fs->location >= CFP_NUM_RULES) + if (fs->location != RX_CLS_LOC_ANY && + fs->location > bcm_sf2_cfp_rule_size(priv)) return -EINVAL; if (fs->location != RX_CLS_LOC_ANY && test_bit(fs->location, priv->cfp.used)) return -EBUSY; - if (fs->location != RX_CLS_LOC_ANY && - fs->location > bcm_sf2_cfp_rule_size(priv)) - return -EINVAL; - /* This rule is a Wake-on-LAN filter and we must specifically * target the CPU port in order for it to be working. */ @@ -839,7 +836,7 @@ static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port, u32 next_loc = 0; int ret; - if (loc >= CFP_NUM_RULES) + if (loc > bcm_sf2_cfp_rule_size(priv)) return -EINVAL; /* Refuse deleting unused rules, and those that are not unique since diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index 8aa3b0af9fc2bd992d32da1a5da47d8d25e0966e..05982e9fb6bbc448ad57f3c49b89f338ee8143b3 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -824,8 +824,9 @@ mt7530_port_set_vlan_unaware(struct dsa_switch *ds, int port) */ mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK, MT7530_PORT_MATRIX_MODE); - mt7530_rmw(priv, MT7530_PVC_P(port), VLAN_ATTR_MASK, - VLAN_ATTR(MT7530_VLAN_TRANSPARENT)); + mt7530_rmw(priv, MT7530_PVC_P(port), VLAN_ATTR_MASK | PVC_EG_TAG_MASK, + VLAN_ATTR(MT7530_VLAN_TRANSPARENT) | + PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT)); priv->ports[port].vlan_filtering = false; @@ -843,8 +844,8 @@ mt7530_port_set_vlan_unaware(struct dsa_switch *ds, int port) if (all_user_ports_removed) { mt7530_write(priv, MT7530_PCR_P(MT7530_CPU_PORT), PCR_MATRIX(dsa_user_ports(priv->ds))); - mt7530_write(priv, MT7530_PVC_P(MT7530_CPU_PORT), - PORT_SPEC_TAG); + mt7530_write(priv, MT7530_PVC_P(MT7530_CPU_PORT), PORT_SPEC_TAG + | PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT)); } } @@ -870,8 +871,9 @@ mt7530_port_set_vlan_aware(struct dsa_switch *ds, int port) /* Set the port as a user port which is to be able to recognize VID * from incoming packets before fetching entry within the VLAN table. */ - mt7530_rmw(priv, MT7530_PVC_P(port), VLAN_ATTR_MASK, - VLAN_ATTR(MT7530_VLAN_USER)); + mt7530_rmw(priv, MT7530_PVC_P(port), VLAN_ATTR_MASK | PVC_EG_TAG_MASK, + VLAN_ATTR(MT7530_VLAN_USER) | + PVC_EG_TAG(MT7530_VLAN_EG_DISABLED)); } static void @@ -1297,6 +1299,10 @@ mt7530_setup(struct dsa_switch *ds) mt7530_cpu_port_enable(priv, i); else mt7530_port_disable(ds, i, NULL); + + /* Enable consistent egress tag */ + mt7530_rmw(priv, MT7530_PVC_P(i), PVC_EG_TAG_MASK, + PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT)); } /* Flush the FDB table */ diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h index d9b407a22a584720c7037dc91016c23b078200ab..ea30f10397aad237cb91ec1404a5360c2900404a 100644 --- a/drivers/net/dsa/mt7530.h +++ b/drivers/net/dsa/mt7530.h @@ -167,9 +167,16 @@ enum mt7530_port_mode { /* Register for port vlan control */ #define MT7530_PVC_P(x) (0x2010 + ((x) * 0x100)) #define PORT_SPEC_TAG BIT(5) +#define PVC_EG_TAG(x) (((x) & 0x7) << 8) +#define PVC_EG_TAG_MASK PVC_EG_TAG(7) #define VLAN_ATTR(x) (((x) & 0x3) << 6) #define VLAN_ATTR_MASK VLAN_ATTR(3) +enum mt7530_vlan_port_eg_tag { + MT7530_VLAN_EG_DISABLED = 0, + MT7530_VLAN_EG_CONSISTENT = 1, +}; + enum mt7530_vlan_port_attr { MT7530_VLAN_USER = 0, MT7530_VLAN_TRANSPARENT = 3, diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index d96a84a62d78dff9625ce78d15779a05df8b510c..5519eff5844175bab7732478c5f7fbf9028788b5 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -515,7 +515,7 @@ static void xgbe_isr_task(unsigned long data) xgbe_disable_rx_tx_ints(pdata); /* Turn on polling */ - __napi_schedule_irqoff(&pdata->napi); + __napi_schedule(&pdata->napi); } } else { /* Don't clear Rx/Tx status if doing per channel DMA diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 3fdf135bad56e4722b55b2fd4b4d1b20bbc5c69d..6b761f6b8fd562e023d3d0229590d477968bdec0 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -677,7 +677,8 @@ static struct sk_buff *bcm_sysport_rx_refill(struct bcm_sysport_priv *priv, dma_addr_t mapping; /* Allocate a new SKB for a new packet */ - skb = netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH); + skb = __netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH, + GFP_ATOMIC | __GFP_NOWARN); if (!skb) { priv->mib.alloc_rx_buff_failed++; netif_err(priv, rx_err, ndev, "SKB alloc failed\n"); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index f008c91d4566bb93b229fbdbb17978ad81311125..dca58d28d82ffcdf4d885ae9e83c4c4f5ddd5ddb 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -7562,6 +7562,7 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev, netdev_features_t features) { struct bnxt *bp = netdev_priv(dev); + netdev_features_t vlan_features; if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp)) features &= ~NETIF_F_NTUPLE; @@ -7578,12 +7579,14 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev, /* Both CTAG and STAG VLAN accelaration on the RX side have to be * turned on or off together. */ - if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) != - (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) { + vlan_features = features & (NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_STAG_RX); + if (vlan_features != (NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_STAG_RX)) { if (dev->features & NETIF_F_HW_VLAN_CTAG_RX) features &= ~(NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX); - else + else if (vlan_features) features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX; } @@ -9297,8 +9300,11 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev) } } - if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev)) - dev_close(netdev); + if (result != PCI_ERS_RESULT_RECOVERED) { + if (netif_running(netdev)) + dev_close(netdev); + pci_disable_device(pdev); + } rtnl_unlock(); @@ -9309,7 +9315,7 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev) err); /* non-fatal, continue */ } - return PCI_ERS_RESULT_RECOVERED; + return result; } /** diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 585f5aef0a45bb0b748876d5db2fbdf7ed4663aa..f3f5484c43e4a5434ca8b3b57f1193a9599c74b7 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -839,7 +839,6 @@ struct bnxt_vf_info { #define BNXT_VF_LINK_FORCED 0x4 #define BNXT_VF_LINK_UP 0x8 #define BNXT_VF_TRUST 0x10 - u32 func_flags; /* func cfg flags */ u32 min_tx_rate; u32 max_tx_rate; void *hwrm_cmd_req_addr; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index 3962f6fd543c96f6322ca531c9ebdb3eb1cd2a23..bba6f09279d5e7d6de073c60adfcb8e72f83bcfe 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -99,11 +99,10 @@ int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting) if (old_setting == setting) return 0; - func_flags = vf->func_flags; if (setting) - func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE; + func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE; else - func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE; + func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE; /*TODO: if the driver supports VLAN filter on guest VLAN, * the spoof check should also include vlan anti-spoofing */ @@ -112,7 +111,6 @@ int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting) req.flags = cpu_to_le32(func_flags); rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (!rc) { - vf->func_flags = func_flags; if (setting) vf->flags |= BNXT_VF_SPOOFCHK; else @@ -197,7 +195,6 @@ int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac) memcpy(vf->mac_addr, mac, ETH_ALEN); bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); req.fid = cpu_to_le16(vf->fw_fid); - req.flags = cpu_to_le32(vf->func_flags); req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR); memcpy(req.dflt_mac_addr, mac, ETH_ALEN); return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); @@ -235,7 +232,6 @@ int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos, bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); req.fid = cpu_to_le16(vf->fw_fid); - req.flags = cpu_to_le32(vf->func_flags); req.dflt_vlan = cpu_to_le16(vlan_tag); req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN); rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); @@ -274,7 +270,6 @@ int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate, return 0; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); req.fid = cpu_to_le16(vf->fw_fid); - req.flags = cpu_to_le32(vf->func_flags); req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW); req.max_bw = cpu_to_le32(max_tx_rate); req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW); diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 736a6a5fbd98db31681bfab9d966cb6b908cf886..89cc146d2c5c8a93f656118c1ee08205ef00e4cf 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -998,6 +998,8 @@ static void bcmgenet_get_ethtool_stats(struct net_device *dev, if (netif_running(dev)) bcmgenet_update_mib_counters(priv); + dev->netdev_ops->ndo_get_stats(dev); + for (i = 0; i < BCMGENET_STATS_LEN; i++) { const struct bcmgenet_stats *s; char *p; @@ -1697,7 +1699,8 @@ static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv, dma_addr_t mapping; /* Allocate a new Rx skb */ - skb = netdev_alloc_skb(priv->dev, priv->rx_buf_len + SKB_ALIGNMENT); + skb = __netdev_alloc_skb(priv->dev, priv->rx_buf_len + SKB_ALIGNMENT, + GFP_ATOMIC | __GFP_NOWARN); if (!skb) { priv->mib.alloc_rx_buff_failed++; netif_err(priv, rx_err, priv->dev, @@ -3211,6 +3214,7 @@ static struct net_device_stats *bcmgenet_get_stats(struct net_device *dev) dev->stats.rx_packets = rx_packets; dev->stats.rx_errors = rx_errors; dev->stats.rx_missed_errors = rx_errors; + dev->stats.rx_dropped = rx_dropped; return &dev->stats; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c index b766362031c32e4be277d13c8f6b0c3397eab97d..5bc58429bb1c4ceb7b5d772ce439f3adbd1de258 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c @@ -1065,9 +1065,9 @@ static void cudbg_t4_fwcache(struct cudbg_init *pdbg_init, } } -static unsigned long cudbg_mem_region_size(struct cudbg_init *pdbg_init, - struct cudbg_error *cudbg_err, - u8 mem_type) +static int cudbg_mem_region_size(struct cudbg_init *pdbg_init, + struct cudbg_error *cudbg_err, + u8 mem_type, unsigned long *region_size) { struct adapter *padap = pdbg_init->adap; struct cudbg_meminfo mem_info; @@ -1076,15 +1076,23 @@ static unsigned long cudbg_mem_region_size(struct cudbg_init *pdbg_init, memset(&mem_info, 0, sizeof(struct cudbg_meminfo)); rc = cudbg_fill_meminfo(padap, &mem_info); - if (rc) + if (rc) { + cudbg_err->sys_err = rc; return rc; + } cudbg_t4_fwcache(pdbg_init, cudbg_err); rc = cudbg_meminfo_get_mem_index(padap, &mem_info, mem_type, &mc_idx); - if (rc) + if (rc) { + cudbg_err->sys_err = rc; return rc; + } + + if (region_size) + *region_size = mem_info.avail[mc_idx].limit - + mem_info.avail[mc_idx].base; - return mem_info.avail[mc_idx].limit - mem_info.avail[mc_idx].base; + return 0; } static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init, @@ -1092,7 +1100,12 @@ static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init, struct cudbg_error *cudbg_err, u8 mem_type) { - unsigned long size = cudbg_mem_region_size(pdbg_init, cudbg_err, mem_type); + unsigned long size = 0; + int rc; + + rc = cudbg_mem_region_size(pdbg_init, cudbg_err, mem_type, &size); + if (rc) + return rc; return cudbg_read_fw_mem(pdbg_init, dbg_buff, mem_type, size, cudbg_err); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c index 9f9d6cae39d555057ba63ac6ee874d0884c0c51d..ff7e58a8c90fb59c14472ecaf85ecae5c9febd36 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c @@ -246,6 +246,9 @@ static int cxgb4_ptp_fineadjtime(struct adapter *adapter, s64 delta) FW_PTP_CMD_PORTID_V(0)); c.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(sizeof(c) / 16)); c.u.ts.sc = FW_PTP_SC_ADJ_FTIME; + c.u.ts.sign = (delta < 0) ? 1 : 0; + if (delta < 0) + delta = -delta; c.u.ts.tm = cpu_to_be64(delta); err = t4_wr_mbox(adapter, adapter->mbox, &c, sizeof(c), NULL); @@ -308,32 +311,17 @@ static int cxgb4_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) */ static int cxgb4_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) { - struct adapter *adapter = (struct adapter *)container_of(ptp, - struct adapter, ptp_clock_info); - struct fw_ptp_cmd c; + struct adapter *adapter = container_of(ptp, struct adapter, + ptp_clock_info); u64 ns; - int err; - memset(&c, 0, sizeof(c)); - c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PTP_CMD) | - FW_CMD_REQUEST_F | - FW_CMD_READ_F | - FW_PTP_CMD_PORTID_V(0)); - c.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(sizeof(c) / 16)); - c.u.ts.sc = FW_PTP_SC_GET_TIME; - - err = t4_wr_mbox(adapter, adapter->mbox, &c, sizeof(c), &c); - if (err < 0) { - dev_err(adapter->pdev_dev, - "PTP: %s error %d\n", __func__, -err); - return err; - } + ns = t4_read_reg(adapter, T5_PORT_REG(0, MAC_PORT_PTP_SUM_LO_A)); + ns |= (u64)t4_read_reg(adapter, + T5_PORT_REG(0, MAC_PORT_PTP_SUM_HI_A)) << 32; /* convert to timespec*/ - ns = be64_to_cpu(c.u.ts.tm); *ts = ns_to_timespec64(ns); - - return err; + return 0; } /** diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 8350c0c9b89d1878ca6ff81218a70841ce9f37d6..5934ec9b6a31f30d9f190a5e0a12833309740b4b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -3748,7 +3748,7 @@ int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver) FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_VERSION)); ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val); - if (ret < 0) + if (ret) return ret; *phy_fw_ver = val; return 0; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index eb222d40ddbf32807b17c45cd42f5271d21ba12e..a64eb6ac5c7681fe7969fcc661b4520466dfdcd3 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h @@ -1896,6 +1896,9 @@ #define MAC_PORT_CFG2_A 0x818 +#define MAC_PORT_PTP_SUM_LO_A 0x990 +#define MAC_PORT_PTP_SUM_HI_A 0x994 + #define MPS_CMN_CTL_A 0x9000 #define COUNTPAUSEMCRX_S 5 diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index bf80855dd0dd4337e7a9c577744d4202fb00c4d3..d06a89e99872d80f160c2e6fc2deadc9247c6535 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -488,6 +488,12 @@ struct fec_enet_priv_rx_q { struct sk_buff *rx_skbuff[RX_RING_SIZE]; }; +struct fec_stop_mode_gpr { + struct regmap *gpr; + u8 reg; + u8 bit; +}; + /* The FEC buffer descriptors track the ring buffers. The rx_bd_base and * tx_bd_base always point to the base of the buffer descriptors. The * cur_rx and cur_tx point to the currently available buffer. @@ -563,6 +569,7 @@ struct fec_enet_private { int hwts_tx_en; struct delayed_work time_keep; struct regulator *reg_phy; + struct fec_stop_mode_gpr stop_gpr; unsigned int tx_align; unsigned int rx_align; diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 9142992ccd5a797f22539962d6b6650973c74b72..48c58f93b124bb10fbbbfc98e05e65ad0d89bf75 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -62,6 +62,8 @@ #include #include #include +#include +#include #include #include @@ -84,6 +86,56 @@ static void fec_enet_itr_coal_init(struct net_device *ndev); #define FEC_ENET_OPD_V 0xFFF0 #define FEC_MDIO_PM_TIMEOUT 100 /* ms */ +struct fec_devinfo { + u32 quirks; + u8 stop_gpr_reg; + u8 stop_gpr_bit; +}; + +static const struct fec_devinfo fec_imx25_info = { + .quirks = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR | + FEC_QUIRK_HAS_FRREG, +}; + +static const struct fec_devinfo fec_imx27_info = { + .quirks = FEC_QUIRK_MIB_CLEAR | FEC_QUIRK_HAS_FRREG, +}; + +static const struct fec_devinfo fec_imx28_info = { + .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME | + FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC | + FEC_QUIRK_HAS_FRREG, +}; + +static const struct fec_devinfo fec_imx6q_info = { + .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | + FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | + FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 | + FEC_QUIRK_HAS_RACC, + .stop_gpr_reg = 0x34, + .stop_gpr_bit = 27, +}; + +static const struct fec_devinfo fec_mvf600_info = { + .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_RACC, +}; + +static const struct fec_devinfo fec_imx6x_info = { + .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | + FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | + FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB | + FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE | + FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE, +}; + +static const struct fec_devinfo fec_imx6ul_info = { + .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | + FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | + FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR007885 | + FEC_QUIRK_BUG_CAPTURE | FEC_QUIRK_HAS_RACC | + FEC_QUIRK_HAS_COALESCE, +}; + static struct platform_device_id fec_devtype[] = { { /* keep it for coldfire */ @@ -91,39 +143,25 @@ static struct platform_device_id fec_devtype[] = { .driver_data = 0, }, { .name = "imx25-fec", - .driver_data = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR | - FEC_QUIRK_HAS_FRREG, + .driver_data = (kernel_ulong_t)&fec_imx25_info, }, { .name = "imx27-fec", - .driver_data = FEC_QUIRK_MIB_CLEAR | FEC_QUIRK_HAS_FRREG, + .driver_data = (kernel_ulong_t)&fec_imx27_info, }, { .name = "imx28-fec", - .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME | - FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC | - FEC_QUIRK_HAS_FRREG, + .driver_data = (kernel_ulong_t)&fec_imx28_info, }, { .name = "imx6q-fec", - .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | - FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | - FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 | - FEC_QUIRK_HAS_RACC, + .driver_data = (kernel_ulong_t)&fec_imx6q_info, }, { .name = "mvf600-fec", - .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_RACC, + .driver_data = (kernel_ulong_t)&fec_mvf600_info, }, { .name = "imx6sx-fec", - .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | - FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | - FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB | - FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE | - FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE, + .driver_data = (kernel_ulong_t)&fec_imx6x_info, }, { .name = "imx6ul-fec", - .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | - FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | - FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR007885 | - FEC_QUIRK_BUG_CAPTURE | FEC_QUIRK_HAS_RACC | - FEC_QUIRK_HAS_COALESCE, + .driver_data = (kernel_ulong_t)&fec_imx6ul_info, }, { /* sentinel */ } @@ -1089,11 +1127,28 @@ fec_restart(struct net_device *ndev) } +static void fec_enet_stop_mode(struct fec_enet_private *fep, bool enabled) +{ + struct fec_platform_data *pdata = fep->pdev->dev.platform_data; + struct fec_stop_mode_gpr *stop_gpr = &fep->stop_gpr; + + if (stop_gpr->gpr) { + if (enabled) + regmap_update_bits(stop_gpr->gpr, stop_gpr->reg, + BIT(stop_gpr->bit), + BIT(stop_gpr->bit)); + else + regmap_update_bits(stop_gpr->gpr, stop_gpr->reg, + BIT(stop_gpr->bit), 0); + } else if (pdata && pdata->sleep_mode_enable) { + pdata->sleep_mode_enable(enabled); + } +} + static void fec_stop(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); - struct fec_platform_data *pdata = fep->pdev->dev.platform_data; u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8); u32 val; @@ -1122,9 +1177,7 @@ fec_stop(struct net_device *ndev) val = readl(fep->hwp + FEC_ECNTRL); val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP); writel(val, fep->hwp + FEC_ECNTRL); - - if (pdata && pdata->sleep_mode_enable) - pdata->sleep_mode_enable(true); + fec_enet_stop_mode(fep, true); } writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); @@ -3347,6 +3400,37 @@ static int fec_enet_get_irq_cnt(struct platform_device *pdev) return irq_cnt; } +static int fec_enet_init_stop_mode(struct fec_enet_private *fep, + struct fec_devinfo *dev_info, + struct device_node *np) +{ + struct device_node *gpr_np; + int ret = 0; + + if (!dev_info) + return 0; + + gpr_np = of_parse_phandle(np, "gpr", 0); + if (!gpr_np) + return 0; + + fep->stop_gpr.gpr = syscon_node_to_regmap(gpr_np); + if (IS_ERR(fep->stop_gpr.gpr)) { + dev_err(&fep->pdev->dev, "could not find gpr regmap\n"); + ret = PTR_ERR(fep->stop_gpr.gpr); + fep->stop_gpr.gpr = NULL; + goto out; + } + + fep->stop_gpr.reg = dev_info->stop_gpr_reg; + fep->stop_gpr.bit = dev_info->stop_gpr_bit; + +out: + of_node_put(gpr_np); + + return ret; +} + static int fec_probe(struct platform_device *pdev) { @@ -3362,6 +3446,7 @@ fec_probe(struct platform_device *pdev) int num_rx_qs; char irq_name[8]; int irq_cnt; + struct fec_devinfo *dev_info; fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs); @@ -3379,7 +3464,9 @@ fec_probe(struct platform_device *pdev) of_id = of_match_device(fec_dt_ids, &pdev->dev); if (of_id) pdev->id_entry = of_id->data; - fep->quirks = pdev->id_entry->driver_data; + dev_info = (struct fec_devinfo *)pdev->id_entry->driver_data; + if (dev_info) + fep->quirks = dev_info->quirks; fep->netdev = ndev; fep->num_rx_queues = num_rx_qs; @@ -3414,6 +3501,10 @@ fec_probe(struct platform_device *pdev) if (of_get_property(np, "fsl,magic-packet", NULL)) fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET; + ret = fec_enet_init_stop_mode(fep, dev_info, np); + if (ret) + goto failed_stop_mode; + phy_node = of_parse_phandle(np, "phy-handle", 0); if (!phy_node && of_phy_is_fixed_link(np)) { ret = of_phy_register_fixed_link(np); @@ -3583,6 +3674,7 @@ fec_probe(struct platform_device *pdev) if (of_phy_is_fixed_link(np)) of_phy_deregister_fixed_link(np); of_node_put(phy_node); +failed_stop_mode: failed_phy: dev_id--; failed_ioremap: @@ -3660,7 +3752,6 @@ static int __maybe_unused fec_resume(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); struct fec_enet_private *fep = netdev_priv(ndev); - struct fec_platform_data *pdata = fep->pdev->dev.platform_data; int ret; int val; @@ -3678,8 +3769,8 @@ static int __maybe_unused fec_resume(struct device *dev) goto failed_clk; } if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) { - if (pdata && pdata->sleep_mode_enable) - pdata->sleep_mode_enable(false); + fec_enet_stop_mode(fep, false); + val = readl(fep->hwp + FEC_ECNTRL); val &= ~(FEC_ECR_MAGICEN | FEC_ECR_SLEEP); writel(val, fep->hwp + FEC_ECNTRL); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c index 4d09ea786b35fee607ecb6ea45b148458b98e374..ee715bf785adf0bf1705aa042f4ccac4a8158040 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c @@ -398,7 +398,8 @@ static int cmdq_sync_cmd_direct_resp(struct hinic_cmdq *cmdq, spin_unlock_bh(&cmdq->cmdq_lock); - if (!wait_for_completion_timeout(&done, CMDQ_TIMEOUT)) { + if (!wait_for_completion_timeout(&done, + msecs_to_jiffies(CMDQ_TIMEOUT))) { spin_lock_bh(&cmdq->cmdq_lock); if (cmdq->errcode[curr_prod_idx] == &errcode) diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c index 9deec13d98e937829fe1bdb17c89acb57224cbe8..4c91c8ceac5f9b59e8bbb716a09a83ceefc6c42b 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c @@ -370,50 +370,6 @@ static int wait_for_db_state(struct hinic_hwdev *hwdev) return -EFAULT; } -static int wait_for_io_stopped(struct hinic_hwdev *hwdev) -{ - struct hinic_cmd_io_status cmd_io_status; - struct hinic_hwif *hwif = hwdev->hwif; - struct pci_dev *pdev = hwif->pdev; - struct hinic_pfhwdev *pfhwdev; - unsigned long end; - u16 out_size; - int err; - - if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) { - dev_err(&pdev->dev, "Unsupported PCI Function type\n"); - return -EINVAL; - } - - pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); - - cmd_io_status.func_idx = HINIC_HWIF_FUNC_IDX(hwif); - - end = jiffies + msecs_to_jiffies(IO_STATUS_TIMEOUT); - do { - err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM, - HINIC_COMM_CMD_IO_STATUS_GET, - &cmd_io_status, sizeof(cmd_io_status), - &cmd_io_status, &out_size, - HINIC_MGMT_MSG_SYNC); - if ((err) || (out_size != sizeof(cmd_io_status))) { - dev_err(&pdev->dev, "Failed to get IO status, ret = %d\n", - err); - return err; - } - - if (cmd_io_status.status == IO_STOPPED) { - dev_info(&pdev->dev, "IO stopped\n"); - return 0; - } - - msleep(20); - } while (time_before(jiffies, end)); - - dev_err(&pdev->dev, "Wait for IO stopped - Timeout\n"); - return -ETIMEDOUT; -} - /** * clear_io_resource - set the IO resources as not active in the NIC * @hwdev: the NIC HW device @@ -433,11 +389,8 @@ static int clear_io_resources(struct hinic_hwdev *hwdev) return -EINVAL; } - err = wait_for_io_stopped(hwdev); - if (err) { - dev_err(&pdev->dev, "IO has not stopped yet\n"); - return err; - } + /* sleep 100ms to wait for firmware stopping I/O */ + msleep(100); cmd_clear_io_res.func_idx = HINIC_HWIF_FUNC_IDX(hwif); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c index 278dc13f3dae8ccda357cf866adb01ada0d11786..9fcf2e5e000395100a7977f02dfb3b66c02307eb 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c @@ -52,7 +52,7 @@ #define MSG_NOT_RESP 0xFFFF -#define MGMT_MSG_TIMEOUT 1000 +#define MGMT_MSG_TIMEOUT 5000 #define mgmt_to_pfhwdev(pf_mgmt) \ container_of(pf_mgmt, struct hinic_pfhwdev, pf_to_mgmt) @@ -276,7 +276,8 @@ static int msg_to_mgmt_sync(struct hinic_pf_to_mgmt *pf_to_mgmt, goto unlock_sync_msg; } - if (!wait_for_completion_timeout(recv_done, MGMT_MSG_TIMEOUT)) { + if (!wait_for_completion_timeout(recv_done, + msecs_to_jiffies(MGMT_MSG_TIMEOUT))) { dev_err(&pdev->dev, "MGMT timeout, MSG id = %d\n", msg_id); err = -ETIMEDOUT; goto unlock_sync_msg; diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 4afe56a6eedfbedc09c5b1826692a0184369a726..f7825c7b92fe31e372345eff922dd15db3245ae6 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -2539,6 +2539,7 @@ static int mlx4_allocate_default_counters(struct mlx4_dev *dev) if (!err || err == -ENOSPC) { priv->def_counter[port] = idx; + err = 0; } else if (err == -ENOENT) { err = 0; continue; @@ -2589,7 +2590,8 @@ int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx, u8 usage) MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); if (!err) *idx = get_param_l(&out_param); - + if (WARN_ON(err == -ENOSPC)) + err = -EINVAL; return err; } return __mlx4_counter_alloc(dev, idx); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index a53736c26c0cec416b119878356e2beffe17b37e..30045668472811eb8a8682b9932ece13eac032f2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -862,7 +862,6 @@ static void cmd_work_handler(struct work_struct *work) } cmd->ent_arr[ent->idx] = ent; - set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state); lay = get_inst(cmd, ent->idx); ent->lay = lay; memset(lay, 0, sizeof(*lay)); @@ -884,6 +883,7 @@ static void cmd_work_handler(struct work_struct *work) if (ent->callback) schedule_delayed_work(&ent->cb_timeout_work, cb_timeout); + set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state); /* Skip sending command to fw if internal error */ if (pci_channel_offline(dev->pdev) || @@ -896,6 +896,10 @@ static void cmd_work_handler(struct work_struct *work) MLX5_SET(mbox_out, ent->out, syndrome, drv_synd); mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true); + /* no doorbell, no need to keep the entry */ + free_ent(cmd, ent->idx); + if (ent->callback) + free_cmd(ent); return; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c index d4ec93bde4dedbaeca4bb5976c705a3ea6b83f82..2266c09b741a26b134858ea9c56c86234bf190c9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c @@ -796,7 +796,7 @@ struct mlx5_fw_tracer *mlx5_fw_tracer_create(struct mlx5_core_dev *dev) return NULL; } - tracer = kzalloc(sizeof(*tracer), GFP_KERNEL); + tracer = kvzalloc(sizeof(*tracer), GFP_KERNEL); if (!tracer) return ERR_PTR(-ENOMEM); @@ -842,7 +842,7 @@ struct mlx5_fw_tracer *mlx5_fw_tracer_create(struct mlx5_core_dev *dev) tracer->dev = NULL; destroy_workqueue(tracer->work_queue); free_tracer: - kfree(tracer); + kvfree(tracer); return ERR_PTR(err); } @@ -919,7 +919,7 @@ void mlx5_fw_tracer_destroy(struct mlx5_fw_tracer *tracer) mlx5_fw_tracer_destroy_log_buf(tracer); flush_workqueue(tracer->work_queue); destroy_workqueue(tracer->work_queue); - kfree(tracer); + kvfree(tracer); } void mlx5_fw_tracer_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index c8928ce69185f287e3c8a6134229235a5fb013a7..3050853774ee0f00050554ef3f398c2ee18ca17d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -2217,12 +2217,11 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv, #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6) struct mlx5e_rep_priv *uplink_rpriv; struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; - int ret; - ret = ipv6_stub->ipv6_dst_lookup(dev_net(mirred_dev), NULL, &dst, - fl6); - if (ret < 0) - return ret; + dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(mirred_dev), NULL, fl6, + NULL); + if (IS_ERR(dst)) + return PTR_ERR(dst); if (!(*out_ttl)) *out_ttl = ip6_dst_hoplimit(dst); @@ -2428,7 +2427,7 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv, int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size); int ipv6_encap_size = ETH_HLEN + sizeof(struct ipv6hdr) + VXLAN_HLEN; struct ip_tunnel_key *tun_key = &e->tun_info.key; - struct net_device *out_dev; + struct net_device *out_dev = NULL; struct neighbour *n = NULL; struct flowi6 fl6 = {}; u8 nud_state, tos, ttl; diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c index c51b2adfc1e19fcd739b4f7a2115d3842b9423b0..2cbfa5cfefabc8522386aa3d127a948245949cf1 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c @@ -316,7 +316,7 @@ struct mlxsw_afa_block *mlxsw_afa_block_create(struct mlxsw_afa *mlxsw_afa) block = kzalloc(sizeof(*block), GFP_KERNEL); if (!block) - return NULL; + return ERR_PTR(-ENOMEM); INIT_LIST_HEAD(&block->resource_list); block->afa = mlxsw_afa; @@ -344,7 +344,7 @@ struct mlxsw_afa_block *mlxsw_afa_block_create(struct mlxsw_afa *mlxsw_afa) mlxsw_afa_set_destroy(block->first_set); err_first_set_create: kfree(block); - return NULL; + return ERR_PTR(-ENOMEM); } EXPORT_SYMBOL(mlxsw_afa_block_create); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c index 8ca77f3e8f279f1ada24240042c793c460ffabe0..ffd4b055fead73d5d7c4dc5519ae739074631173 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c @@ -88,8 +88,8 @@ static int mlxsw_sp2_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv, * to be written using PEFA register to all indexes for all regions. */ afa_block = mlxsw_afa_block_create(mlxsw_sp->afa); - if (!afa_block) { - err = -ENOMEM; + if (IS_ERR(afa_block)) { + err = PTR_ERR(afa_block); goto err_afa_block; } err = mlxsw_afa_block_continue(afa_block); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c index c4f9238591e6ef1c338a7434dfaa4f1701ea4106..c99f5542da1ed1c28de97e24ca48bb8fbda4855a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c @@ -442,7 +442,8 @@ mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl) rulei = kzalloc(sizeof(*rulei), GFP_KERNEL); if (!rulei) - return NULL; + return ERR_PTR(-ENOMEM); + rulei->act_block = mlxsw_afa_block_create(acl->mlxsw_sp->afa); if (IS_ERR(rulei->act_block)) { err = PTR_ERR(rulei->act_block); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c index 8d211972c5e90fbe1a24dc32edf6f371ff018bd4..9f4eb3cde93e293df83f8e356d5a1c02c76dd946 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c @@ -98,9 +98,11 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, u8 prio = tcf_vlan_push_prio(a); u16 vid = tcf_vlan_push_vid(a); - return mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei, - action, vid, - proto, prio, extack); + err = mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei, + action, vid, + proto, prio, extack); + if (err) + return err; } else { NL_SET_ERR_MSG_MOD(extack, "Unsupported action"); dev_err(mlxsw_sp->bus_info->dev, "Unsupported action\n"); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c index 346f4a5fe053bc186008ebbf789f5d8f0919e1fa..221aa6a474eb104ce5be1591e91d134dc4aeb68b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c @@ -199,8 +199,8 @@ mlxsw_sp_mr_tcam_afa_block_create(struct mlxsw_sp *mlxsw_sp, int err; afa_block = mlxsw_afa_block_create(mlxsw_sp->afa); - if (!afa_block) - return ERR_PTR(-ENOMEM); + if (IS_ERR(afa_block)) + return afa_block; err = mlxsw_afa_block_append_allocated_counter(afa_block, counter_index); diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h index d743a37a3cee8db94501ebe27bce03612df23be1..e5dda2c27f187c639af06cb28a2fb924234107f1 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-config.h +++ b/drivers/net/ethernet/neterion/vxge/vxge-config.h @@ -2065,7 +2065,7 @@ vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask); if ((level >= VXGE_ERR && VXGE_COMPONENT_LL & VXGE_DEBUG_ERR_MASK) || \ (level >= VXGE_TRACE && VXGE_COMPONENT_LL & VXGE_DEBUG_TRACE_MASK))\ if ((mask & VXGE_DEBUG_MASK) == mask) \ - printk(fmt "\n", __VA_ARGS__); \ + printk(fmt "\n", ##__VA_ARGS__); \ } while (0) #else #define vxge_debug_ll(level, mask, fmt, ...) diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.h b/drivers/net/ethernet/neterion/vxge/vxge-main.h index 59a57ff5e96aff0940c06306b380a411691b1b68..9c86f4f9cd4242d764d0c8166e80d86274e07487 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-main.h +++ b/drivers/net/ethernet/neterion/vxge/vxge-main.h @@ -452,49 +452,49 @@ int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override); #if (VXGE_DEBUG_LL_CONFIG & VXGE_DEBUG_MASK) #define vxge_debug_ll_config(level, fmt, ...) \ - vxge_debug_ll(level, VXGE_DEBUG_LL_CONFIG, fmt, __VA_ARGS__) + vxge_debug_ll(level, VXGE_DEBUG_LL_CONFIG, fmt, ##__VA_ARGS__) #else #define vxge_debug_ll_config(level, fmt, ...) #endif #if (VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) #define vxge_debug_init(level, fmt, ...) \ - vxge_debug_ll(level, VXGE_DEBUG_INIT, fmt, __VA_ARGS__) + vxge_debug_ll(level, VXGE_DEBUG_INIT, fmt, ##__VA_ARGS__) #else #define vxge_debug_init(level, fmt, ...) #endif #if (VXGE_DEBUG_TX & VXGE_DEBUG_MASK) #define vxge_debug_tx(level, fmt, ...) \ - vxge_debug_ll(level, VXGE_DEBUG_TX, fmt, __VA_ARGS__) + vxge_debug_ll(level, VXGE_DEBUG_TX, fmt, ##__VA_ARGS__) #else #define vxge_debug_tx(level, fmt, ...) #endif #if (VXGE_DEBUG_RX & VXGE_DEBUG_MASK) #define vxge_debug_rx(level, fmt, ...) \ - vxge_debug_ll(level, VXGE_DEBUG_RX, fmt, __VA_ARGS__) + vxge_debug_ll(level, VXGE_DEBUG_RX, fmt, ##__VA_ARGS__) #else #define vxge_debug_rx(level, fmt, ...) #endif #if (VXGE_DEBUG_MEM & VXGE_DEBUG_MASK) #define vxge_debug_mem(level, fmt, ...) \ - vxge_debug_ll(level, VXGE_DEBUG_MEM, fmt, __VA_ARGS__) + vxge_debug_ll(level, VXGE_DEBUG_MEM, fmt, ##__VA_ARGS__) #else #define vxge_debug_mem(level, fmt, ...) #endif #if (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK) #define vxge_debug_entryexit(level, fmt, ...) \ - vxge_debug_ll(level, VXGE_DEBUG_ENTRYEXIT, fmt, __VA_ARGS__) + vxge_debug_ll(level, VXGE_DEBUG_ENTRYEXIT, fmt, ##__VA_ARGS__) #else #define vxge_debug_entryexit(level, fmt, ...) #endif #if (VXGE_DEBUG_INTR & VXGE_DEBUG_MASK) #define vxge_debug_intr(level, fmt, ...) \ - vxge_debug_ll(level, VXGE_DEBUG_INTR, fmt, __VA_ARGS__) + vxge_debug_ll(level, VXGE_DEBUG_INTR, fmt, ##__VA_ARGS__) #else #define vxge_debug_intr(level, fmt, ...) #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index a6a9688db307f50750ef5103a3fa612f48b2e94f..e50fc8f714dcf6cb3f46e92d5c551532459000a4 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -3316,26 +3316,20 @@ static void qed_chain_free_single(struct qed_dev *cdev, static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *p_chain) { - void **pp_virt_addr_tbl = p_chain->pbl.pp_virt_addr_tbl; + struct addr_tbl_entry *pp_addr_tbl = p_chain->pbl.pp_addr_tbl; u32 page_cnt = p_chain->page_cnt, i, pbl_size; - u8 *p_pbl_virt = p_chain->pbl_sp.p_virt_table; - if (!pp_virt_addr_tbl) + if (!pp_addr_tbl) return; - if (!p_pbl_virt) - goto out; - for (i = 0; i < page_cnt; i++) { - if (!pp_virt_addr_tbl[i]) + if (!pp_addr_tbl[i].virt_addr || !pp_addr_tbl[i].dma_map) break; dma_free_coherent(&cdev->pdev->dev, QED_CHAIN_PAGE_SIZE, - pp_virt_addr_tbl[i], - *(dma_addr_t *)p_pbl_virt); - - p_pbl_virt += QED_CHAIN_PBL_ENTRY_SIZE; + pp_addr_tbl[i].virt_addr, + pp_addr_tbl[i].dma_map); } pbl_size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE; @@ -3345,9 +3339,9 @@ static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *p_chain) pbl_size, p_chain->pbl_sp.p_virt_table, p_chain->pbl_sp.p_phys_table); -out: - vfree(p_chain->pbl.pp_virt_addr_tbl); - p_chain->pbl.pp_virt_addr_tbl = NULL; + + vfree(p_chain->pbl.pp_addr_tbl); + p_chain->pbl.pp_addr_tbl = NULL; } void qed_chain_free(struct qed_dev *cdev, struct qed_chain *p_chain) @@ -3448,19 +3442,19 @@ qed_chain_alloc_pbl(struct qed_dev *cdev, { u32 page_cnt = p_chain->page_cnt, size, i; dma_addr_t p_phys = 0, p_pbl_phys = 0; - void **pp_virt_addr_tbl = NULL; + struct addr_tbl_entry *pp_addr_tbl; u8 *p_pbl_virt = NULL; void *p_virt = NULL; - size = page_cnt * sizeof(*pp_virt_addr_tbl); - pp_virt_addr_tbl = vzalloc(size); - if (!pp_virt_addr_tbl) + size = page_cnt * sizeof(*pp_addr_tbl); + pp_addr_tbl = vzalloc(size); + if (!pp_addr_tbl) return -ENOMEM; /* The allocation of the PBL table is done with its full size, since it * is expected to be successive. * qed_chain_init_pbl_mem() is called even in a case of an allocation - * failure, since pp_virt_addr_tbl was previously allocated, and it + * failure, since tbl was previously allocated, and it * should be saved to allow its freeing during the error flow. */ size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE; @@ -3474,8 +3468,7 @@ qed_chain_alloc_pbl(struct qed_dev *cdev, p_chain->b_external_pbl = true; } - qed_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys, - pp_virt_addr_tbl); + qed_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys, pp_addr_tbl); if (!p_pbl_virt) return -ENOMEM; @@ -3494,7 +3487,8 @@ qed_chain_alloc_pbl(struct qed_dev *cdev, /* Fill the PBL table with the physical address of the page */ *(dma_addr_t *)p_pbl_virt = p_phys; /* Keep the virtual address of the page */ - p_chain->pbl.pp_virt_addr_tbl[i] = p_virt; + p_chain->pbl.pp_addr_tbl[i].virt_addr = p_virt; + p_chain->pbl.pp_addr_tbl[i].dma_map = p_phys; p_pbl_virt += QED_CHAIN_PBL_ENTRY_SIZE; } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index 07f9067affc65ea4d73543c18016174217c1971a..cda5b0a9e9489b71c19871c3193332a88127771b 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c @@ -1720,7 +1720,7 @@ static int qlcnic_83xx_get_reset_instruction_template(struct qlcnic_adapter *p_d ahw->reset.seq_error = 0; ahw->reset.buff = kzalloc(QLC_83XX_RESTART_TEMPLATE_SIZE, GFP_KERNEL); - if (p_dev->ahw->reset.buff == NULL) + if (ahw->reset.buff == NULL) return -ENOMEM; p_buff = p_dev->ahw->reset.buff; diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c index 37786affa975029536d9f3deea9033a304ee4af2..7389648d0feafd96516f66d72f60534153dc0250 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c @@ -288,7 +288,6 @@ static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[], { struct rmnet_priv *priv = netdev_priv(dev); struct net_device *real_dev; - struct rmnet_endpoint *ep; struct rmnet_port *port; u16 mux_id; @@ -303,19 +302,27 @@ static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[], if (data[IFLA_RMNET_MUX_ID]) { mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]); - if (rmnet_get_endpoint(port, mux_id)) { - NL_SET_ERR_MSG_MOD(extack, "MUX ID already exists"); - return -EINVAL; - } - ep = rmnet_get_endpoint(port, priv->mux_id); - if (!ep) - return -ENODEV; - hlist_del_init_rcu(&ep->hlnode); - hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]); + if (mux_id != priv->mux_id) { + struct rmnet_endpoint *ep; + + ep = rmnet_get_endpoint(port, priv->mux_id); + if (!ep) + return -ENODEV; - ep->mux_id = mux_id; - priv->mux_id = mux_id; + if (rmnet_get_endpoint(port, mux_id)) { + NL_SET_ERR_MSG_MOD(extack, + "MUX ID already exists"); + return -EINVAL; + } + + hlist_del_init_rcu(&ep->hlnode); + hlist_add_head_rcu(&ep->hlnode, + &port->muxed_ep[mux_id]); + + ep->mux_id = mux_id; + priv->mux_id = mux_id; + } } if (data[IFLA_RMNET_FLAGS]) { diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c index 0a17535f13ae4313686fd478eb3c0144f4321ab3..03bda2e0b7a892ca36b5236e4e8c497d1a67b734 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c @@ -125,6 +125,7 @@ static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac) { .div = 5, .val = 5, }, { .div = 6, .val = 6, }, { .div = 7, .val = 7, }, + { /* end of array */ } }; clk_configs = devm_kzalloc(dev, sizeof(*clk_configs), GFP_KERNEL); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c index 5b3b06a0a3bf53e1eac9572ae8d14add0c3835e7..33407df6bea693b44903734f939b0bd83e1e12f2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c @@ -274,16 +274,19 @@ static int socfpga_dwmac_set_phy_mode(struct socfpga_dwmac *dwmac) phymode == PHY_INTERFACE_MODE_MII || phymode == PHY_INTERFACE_MODE_GMII || phymode == PHY_INTERFACE_MODE_SGMII) { - ctrl |= SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2); regmap_read(sys_mgr_base_addr, SYSMGR_FPGAGRP_MODULE_REG, &module); module |= (SYSMGR_FPGAGRP_MODULE_EMAC << (reg_shift / 2)); regmap_write(sys_mgr_base_addr, SYSMGR_FPGAGRP_MODULE_REG, module); - } else { - ctrl &= ~(SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2)); } + if (dwmac->f2h_ptp_ref_clk) + ctrl |= SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2); + else + ctrl &= ~(SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << + (reg_shift / 2)); + regmap_write(sys_mgr_base_addr, reg_offset, ctrl); /* Deassert reset for the phy configuration to be sampled by diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c index fc1fa0f9f338713da5c6fcfdd19062ced459a91f..57694eada9955f774e72360d4782713dff2013a8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c @@ -155,6 +155,8 @@ static int sun7i_gmac_probe(struct platform_device *pdev) plat_dat->init = sun7i_gmac_init; plat_dat->exit = sun7i_gmac_exit; plat_dat->fix_mac_speed = sun7i_fix_speed; + plat_dat->tx_fifo_size = 4096; + plat_dat->rx_fifo_size = 16384; ret = sun7i_gmac_init(pdev, plat_dat->bsp_priv); if (ret) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c index 7b2a84320aabd2d15fa13e58d463a83b35871dec..e4e9a7591efe92fb1b762dbca45c83b2c0fc4ff2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c @@ -218,7 +218,7 @@ static void dwmac1000_set_filter(struct mac_device_info *hw, reg++; } - while (reg <= perfect_addr_number) { + while (reg < perfect_addr_number) { writel(0, ioaddr + GMAC_ADDR_HIGH(reg)); writel(0, ioaddr + GMAC_ADDR_LOW(reg)); reg++; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c index 7423262ce5907f9d05ac6e44025c144c86e89618..e1fbd7c81bfa991db3613cf8371f32695cdf1513 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c @@ -36,12 +36,16 @@ static void config_sub_second_increment(void __iomem *ioaddr, unsigned long data; u32 reg_value; - /* For GMAC3.x, 4.x versions, convert the ptp_clock to nano second - * formula = (1/ptp_clock) * 1000000000 - * where ptp_clock is 50MHz if fine method is used to update system + /* For GMAC3.x, 4.x versions, in "fine adjustement mode" set sub-second + * increment to twice the number of nanoseconds of a clock cycle. + * The calculation of the default_addend value by the caller will set it + * to mid-range = 2^31 when the remainder of this division is zero, + * which will make the accumulator overflow once every 2 ptp_clock + * cycles, adding twice the number of nanoseconds of a clock cycle : + * 2000000000ULL / ptp_clock. */ if (value & PTP_TCR_TSCFUPDT) - data = (1000000000ULL / 50000000); + data = (2000000000ULL / ptp_clock); else data = (1000000000ULL / ptp_clock); diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index ff83408733d45115d6d87589314ce6de4bd979eb..36444de701cd9f4f17e132d7b25413032374e0a0 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -801,7 +801,9 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb, if (dst) return dst; } - if (ipv6_stub->ipv6_dst_lookup(geneve->net, gs6->sock->sk, &dst, fl6)) { + dst = ipv6_stub->ipv6_dst_lookup_flow(geneve->net, gs6->sock->sk, fl6, + NULL); + if (IS_ERR(dst)) { netdev_dbg(dev, "no route to %pI6\n", &fl6->daddr); return ERR_PTR(-ENETUNREACH); } diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index df7d6de7c59cd38cb254fe8c8d90003dfbcacb1d..4ad3b877e5fd9afff1d28aca975cee4d9ec32dde 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -1313,7 +1313,8 @@ static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len) struct crypto_aead *tfm; int ret; - tfm = crypto_alloc_aead("gcm(aes)", 0, 0); + /* Pick a sync gcm(aes) cipher to ensure order is preserved. */ + tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) return tfm; @@ -3238,11 +3239,11 @@ static int macsec_newlink(struct net *net, struct net_device *dev, struct netlink_ext_ack *extack) { struct macsec_dev *macsec = macsec_priv(dev); + rx_handler_func_t *rx_handler; + u8 icv_len = DEFAULT_ICV_LEN; struct net_device *real_dev; - int err; + int err, mtu; sci_t sci; - u8 icv_len = DEFAULT_ICV_LEN; - rx_handler_func_t *rx_handler; if (!tb[IFLA_LINK]) return -EINVAL; @@ -3258,7 +3259,11 @@ static int macsec_newlink(struct net *net, struct net_device *dev, if (data && data[IFLA_MACSEC_ICV_LEN]) icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]); - dev->mtu = real_dev->mtu - icv_len - macsec_extra_len(true); + mtu = real_dev->mtu - icv_len - macsec_extra_len(true); + if (mtu < 0) + dev->mtu = 0; + else + dev->mtu = mtu; rx_handler = rtnl_dereference(real_dev->rx_handler); if (rx_handler && rx_handler != macsec_handle_frame) diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index a8f338dc0dfa177b4bb2af9a1f5d68f621f13033..225bfc80811225e5a334950c95c5578c2ae678a1 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -1676,7 +1676,7 @@ static int macvlan_device_event(struct notifier_block *unused, struct macvlan_dev, list); - if (macvlan_sync_address(vlan->dev, dev->dev_addr)) + if (vlan && macvlan_sync_address(vlan->dev, dev->dev_addr)) return NOTIFY_BAD; break; diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index 59b3f1fbabd4f1d95e84e84bf208e7516bc98498..4fed778331573de99b73b2bdc954af6051ebe469 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c @@ -1114,7 +1114,7 @@ static struct dp83640_clock *dp83640_clock_get_bus(struct mii_bus *bus) goto out; } dp83640_clock_init(clock, bus); - list_add_tail(&phyter_clocks, &clock->list); + list_add_tail(&clock->list, &phyter_clocks); out: mutex_unlock(&phyter_clocks_lock); diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index b4c67c3a928b5515f4bb22dda357c42feb4507cd..55caaaf969da5dee6c30a28110c0aac77b0275f0 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -29,6 +29,7 @@ #include #include #include +#include /* Operation Mode Strap Override */ #define MII_KSZPHY_OMSO 0x16 @@ -738,6 +739,12 @@ static int kszphy_resume(struct phy_device *phydev) genphy_resume(phydev); + /* After switching from power-down to normal mode, an internal global + * reset is automatically generated. Wait a minimum of 1 ms before + * read/write access to the PHY registers. + */ + usleep_range(1000, 2000); + ret = kszphy_config_reset(phydev); if (ret) return ret; diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 95524c06e64c21e4062a9f4ec8bc1a85e9416b85..53d9562a8818b8bb92136ea38abbf01d4f8fe092 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -475,6 +475,9 @@ static const struct team_mode *team_mode_get(const char *kind) struct team_mode_item *mitem; const struct team_mode *mode = NULL; + if (!try_module_get(THIS_MODULE)) + return NULL; + spin_lock(&mode_list_lock); mitem = __find_mode(kind); if (!mitem) { @@ -490,6 +493,7 @@ static const struct team_mode *team_mode_get(const char *kind) } spin_unlock(&mode_list_lock); + module_put(THIS_MODULE); return mode; } diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index a7804def11207107fc826ff7533fe5ef36c6ca96..c8222cdf755ddd00b852b6e3eaed70cd2b4e3053 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1294,6 +1294,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */ {QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */ {QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */ + {QMI_FIXED_INTF(0x413c, 0x81cc, 8)}, /* Dell Wireless 5816e */ {QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */ {QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e preproduction config */ {QMI_FIXED_INTF(0x413c, 0x81e0, 0)}, /* Dell Wireless 5821e with eSIM support*/ diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 9f895083bc0aad15a2cadfca2fba43d3406f12a8..b55eeb8f8fa3a56fb945acad1b7eac7b03649191 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -192,8 +192,8 @@ static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb, fl6.flowi6_proto = iph->nexthdr; fl6.flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF; - dst = ip6_route_output(net, NULL, &fl6); - if (dst == dst_null) + dst = ip6_dst_lookup_flow(net, NULL, &fl6, NULL); + if (IS_ERR(dst) || dst == dst_null) goto err; skb_dst_drop(skb); @@ -478,7 +478,8 @@ static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev, if (rt6_need_strict(&ipv6_hdr(skb)->daddr)) return skb; - if (qdisc_tx_is_default(vrf_dev)) + if (qdisc_tx_is_default(vrf_dev) || + IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) return vrf_ip6_out_direct(vrf_dev, sk, skb); return vrf_ip6_out_redirect(vrf_dev, skb); @@ -692,7 +693,8 @@ static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev, ipv4_is_lbcast(ip_hdr(skb)->daddr)) return skb; - if (qdisc_tx_is_default(vrf_dev)) + if (qdisc_tx_is_default(vrf_dev) || + IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) return vrf_ip_out_direct(vrf_dev, sk, skb); return vrf_ip_out_redirect(vrf_dev, skb); diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 64751b089482bcedfbacba0a1301514b5c4e03eb..7ee0bad184662698c78b9009c69a38147a1a3c9c 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1963,7 +1963,6 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan, bool use_cache = ip_tunnel_dst_cache_usable(skb, info); struct dst_entry *ndst; struct flowi6 fl6; - int err; if (!sock6) return ERR_PTR(-EIO); @@ -1986,10 +1985,9 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan, fl6.fl6_dport = dport; fl6.fl6_sport = sport; - err = ipv6_stub->ipv6_dst_lookup(vxlan->net, - sock6->sock->sk, - &ndst, &fl6); - if (unlikely(err < 0)) { + ndst = ipv6_stub->ipv6_dst_lookup_flow(vxlan->net, sock6->sock->sk, + &fl6, NULL); + if (unlikely(IS_ERR(ndst))) { netdev_dbg(dev, "no route to %pI6\n", daddr); return ERR_PTR(-ENETUNREACH); } diff --git a/drivers/net/wimax/i2400m/usb-fw.c b/drivers/net/wimax/i2400m/usb-fw.c index 529ebca1e9e13eb94d4574a29172ec311e614e58..1f7709d24f3526a1498a9f5a2f85e79c71ab811f 100644 --- a/drivers/net/wimax/i2400m/usb-fw.c +++ b/drivers/net/wimax/i2400m/usb-fw.c @@ -354,6 +354,7 @@ ssize_t i2400mu_bus_bm_wait_for_ack(struct i2400m *i2400m, usb_autopm_put_interface(i2400mu->usb_iface); d_fnend(8, dev, "(i2400m %p ack %p size %zu) = %ld\n", i2400m, ack, ack_size, (long) result); + usb_put_urb(¬if_urb); return result; error_exceeded: diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 74f98bbaea889bae3b078c4ae7b3c968b74e7f9e..3e92b88045a53cc30f3407782f1f64dd00680c2d 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -1457,6 +1457,9 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed) ath_chanctx_set_channel(sc, ctx, &hw->conf.chandef); } + if (changed & IEEE80211_CONF_CHANGE_POWER) + ath9k_set_txpower(sc, NULL); + mutex_unlock(&sc->mutex); ath9k_ps_restore(sc); diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c index 44296c015925200864bb91b3ebe361802015a70b..55a809cb31054467af0cc54af1e9bc49e50e51a8 100644 --- a/drivers/net/wireless/ath/wil6210/debugfs.c +++ b/drivers/net/wireless/ath/wil6210/debugfs.c @@ -730,32 +730,6 @@ struct dentry *wil_debugfs_create_ioblob(const char *name, return debugfs_create_file(name, mode, parent, wil_blob, &fops_ioblob); } -/*---reset---*/ -static ssize_t wil_write_file_reset(struct file *file, const char __user *buf, - size_t len, loff_t *ppos) -{ - struct wil6210_priv *wil = file->private_data; - struct net_device *ndev = wil->main_ndev; - - /** - * BUG: - * this code does NOT sync device state with the rest of system - * use with care, debug only!!! - */ - rtnl_lock(); - dev_close(ndev); - ndev->flags &= ~IFF_UP; - rtnl_unlock(); - wil_reset(wil, true); - - return len; -} - -static const struct file_operations fops_reset = { - .write = wil_write_file_reset, - .open = simple_open, -}; - /*---write channel 1..4 to rxon for it, 0 to rxoff---*/ static ssize_t wil_write_file_rxon(struct file *file, const char __user *buf, size_t len, loff_t *ppos) @@ -991,6 +965,8 @@ static ssize_t wil_write_file_txmgmt(struct file *file, const char __user *buf, int rc; void *frame; + memset(¶ms, 0, sizeof(params)); + if (!len) return -EINVAL; @@ -2459,7 +2435,6 @@ static const struct { {"desc", 0444, &fops_txdesc}, {"bf", 0444, &fops_bf}, {"mem_val", 0644, &fops_memread}, - {"reset", 0244, &fops_reset}, {"rxon", 0244, &fops_rxon}, {"tx_mgmt", 0244, &fops_txmgmt}, {"wmi_send", 0244, &fops_wmi}, diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c index 0655cd8845142c244a260252d01b3d74e003d062..d161dc930313d2ec0d67cd1bd5839d4fa7a2b2c8 100644 --- a/drivers/net/wireless/ath/wil6210/interrupt.c +++ b/drivers/net/wireless/ath/wil6210/interrupt.c @@ -1,6 +1,6 @@ /* * Copyright (c) 2012-2017 Qualcomm Atheros, Inc. - * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -590,10 +590,14 @@ static irqreturn_t wil6210_irq_misc(int irq, void *cookie) } if (isr & BIT_DMA_EP_MISC_ICR_HALP) { - wil_dbg_irq(wil, "irq_misc: HALP IRQ invoked\n"); - wil6210_mask_halp(wil); isr &= ~BIT_DMA_EP_MISC_ICR_HALP; - complete(&wil->halp.comp); + if (wil->halp.handle_icr) { + /* no need to handle HALP ICRs until next vote */ + wil->halp.handle_icr = false; + wil_dbg_irq(wil, "irq_misc: HALP IRQ invoked\n"); + wil6210_mask_halp(wil); + complete(&wil->halp.comp); + } } wil->isr_misc = isr; diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index 10673fa9388ec0b266d4963f8e008fe7dc27c8ee..fe91db3478dc8446fb139ac158bafc7e758f199b 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -1687,7 +1687,7 @@ int __wil_up(struct wil6210_priv *wil) return rc; /* Rx RING. After MAC and beacon */ - rc = wil->txrx_ops.rx_init(wil, 1 << rx_ring_order); + rc = wil->txrx_ops.rx_init(wil, rx_ring_order); if (rc) return rc; @@ -1814,11 +1814,14 @@ void wil_halp_vote(struct wil6210_priv *wil) if (++wil->halp.ref_cnt == 1) { reinit_completion(&wil->halp.comp); + /* mark to IRQ context to handle HALP ICR */ + wil->halp.handle_icr = true; wil6210_set_halp(wil); rc = wait_for_completion_timeout(&wil->halp.comp, to_jiffies); if (!rc) { wil_err(wil, "HALP vote timed out\n"); /* Mask HALP as done in case the interrupt is raised */ + wil->halp.handle_icr = false; wil6210_mask_halp(wil); } else { wil_dbg_irq(wil, diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c index 73cdf54521f9bb8dad9c7506acc9df70b121ca05..236dcb6f5e84da7e3a440f8168809632ac30af47 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.c +++ b/drivers/net/wireless/ath/wil6210/txrx.c @@ -881,7 +881,7 @@ static void wil_rx_buf_len_init(struct wil6210_priv *wil) } } -static int wil_rx_init(struct wil6210_priv *wil, u16 size) +static int wil_rx_init(struct wil6210_priv *wil, uint order) { struct wil_ring *vring = &wil->ring_rx; int rc; @@ -895,7 +895,7 @@ static int wil_rx_init(struct wil6210_priv *wil, u16 size) wil_rx_buf_len_init(wil); - vring->size = size; + vring->size = 1 << order; vring->is_rx = true; rc = wil_vring_alloc(wil, vring); if (rc) diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.c b/drivers/net/wireless/ath/wil6210/txrx_edma.c index 5fa8d6ad66482641be406201d091e4564bd22b4b..fe666a3583c105063b3e4cbac64eb59effc5688f 100644 --- a/drivers/net/wireless/ath/wil6210/txrx_edma.c +++ b/drivers/net/wireless/ath/wil6210/txrx_edma.c @@ -268,6 +268,9 @@ static void wil_move_all_rx_buff_to_free_list(struct wil6210_priv *wil, struct list_head *active = &wil->rx_buff_mgmt.active; dma_addr_t pa; + if (!wil->rx_buff_mgmt.buff_arr) + return; + while (!list_empty(active)) { struct wil_rx_buff *rx_buff = list_first_entry(active, struct wil_rx_buff, list); @@ -590,9 +593,9 @@ static void wil_rx_buf_len_init_edma(struct wil6210_priv *wil) WIL_MAX_ETH_MTU : WIL_EDMA_RX_BUF_LEN_DEFAULT; } -static int wil_rx_init_edma(struct wil6210_priv *wil, u16 desc_ring_size) +static int wil_rx_init_edma(struct wil6210_priv *wil, uint desc_ring_order) { - u16 status_ring_size; + u16 status_ring_size, desc_ring_size = 1 << desc_ring_order; struct wil_ring *ring = &wil->ring_rx; int rc; size_t elem_size = wil->use_compressed_rx_status ? @@ -607,7 +610,12 @@ static int wil_rx_init_edma(struct wil6210_priv *wil, u16 desc_ring_size) "compressed RX status cannot be used with SW reorder\n"); return -EINVAL; } - + if (wil->rx_status_ring_order <= desc_ring_order) + /* make sure sring is larger than desc ring */ + wil->rx_status_ring_order = desc_ring_order + 1; + if (wil->rx_buff_id_count <= desc_ring_size) + /* make sure we will not run out of buff_ids */ + wil->rx_buff_id_count = desc_ring_size + 512; if (wil->rx_status_ring_order < WIL_SRING_SIZE_ORDER_MIN || wil->rx_status_ring_order > WIL_SRING_SIZE_ORDER_MAX) wil->rx_status_ring_order = WIL_RX_SRING_SIZE_ORDER_DEFAULT; diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index 75fe1a3b70466165db6865957880bb7ccc613b6f..bc89044d0b66e520b8eedd47b45a46b74bba32f1 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -602,7 +602,7 @@ struct wil_txrx_ops { struct wil_ring *ring, struct sk_buff *skb); irqreturn_t (*irq_tx)(int irq, void *cookie); /* RX ops */ - int (*rx_init)(struct wil6210_priv *wil, u16 ring_size); + int (*rx_init)(struct wil6210_priv *wil, uint ring_order); void (*rx_fini)(struct wil6210_priv *wil); int (*wmi_addba_rx_resp)(struct wil6210_priv *wil, u8 mid, u8 cid, u8 tid, u8 token, u16 status, bool amsdu, @@ -778,6 +778,7 @@ struct wil_halp { struct mutex lock; /* protect halp ref_cnt */ unsigned int ref_cnt; struct completion comp; + u8 handle_icr; }; struct wil_blob_wrapper { diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index 8a603432f531786abdfbaeed583fa25facda79fb..3928b13ae0266d6b3a00217fe594d6bf32adc17d 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -2802,7 +2802,7 @@ static void wmi_event_handle(struct wil6210_priv *wil, if (mid == MID_BROADCAST) mid = 0; - if (mid >= wil->max_vifs) { + if (mid >= ARRAY_SIZE(wil->vifs) || mid >= wil->max_vifs) { wil_dbg_wmi(wil, "invalid mid %d, event skipped\n", mid); return; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c index e6a67bc022090a52f9000a43df05cdef6197b971..bdb87d8e96445c5b4759af8691c0f3c645a8b20f 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c @@ -587,6 +587,7 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_mvm_stat_data { struct iwl_mvm *mvm; + __le32 flags; __le32 mac_id; u8 beacon_filter_average_energy; void *general; @@ -630,6 +631,13 @@ static void iwl_mvm_stat_iterator(void *_data, u8 *mac, } } + /* make sure that beacon statistics don't go backwards with TCM + * request to clear statistics + */ + if (le32_to_cpu(data->flags) & IWL_STATISTICS_REPLY_FLG_CLEAR) + mvmvif->beacon_stats.accu_num_beacons += + mvmvif->beacon_stats.num_beacons; + if (mvmvif->id != id) return; @@ -790,6 +798,7 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm, flags = stats->flag; } + data.flags = flags; iwl_mvm_rx_stats_check_trigger(mvm, pkt); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c index 7b1dff92b7094ff10f8e04fb59d4ea54ee07b87c..93f396d7e6849762d817964c656e477d62e76f9c 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c @@ -1231,6 +1231,9 @@ void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue) iwl_pcie_gen2_txq_unmap(trans, queue); + iwl_pcie_gen2_txq_free_memory(trans, trans_pcie->txq[queue]); + trans_pcie->txq[queue] = NULL; + IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue); } diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index ce2dd06af62e8b987a027d7cb097b08a5147e258..3564f5869b444a7217705f9cde503548bacbd0e2 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -3327,9 +3327,9 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info) param.no_vif = true; if (info->attrs[HWSIM_ATTR_RADIO_NAME]) { - hwname = kasprintf(GFP_KERNEL, "%.*s", - nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]), - (char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME])); + hwname = kstrndup((char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]), + nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]), + GFP_KERNEL); if (!hwname) return -ENOMEM; param.hwname = hwname; @@ -3385,9 +3385,9 @@ static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info) if (info->attrs[HWSIM_ATTR_RADIO_ID]) { idx = nla_get_u32(info->attrs[HWSIM_ATTR_RADIO_ID]); } else if (info->attrs[HWSIM_ATTR_RADIO_NAME]) { - hwname = kasprintf(GFP_KERNEL, "%.*s", - nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]), - (char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME])); + hwname = kstrndup((char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]), + nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]), + GFP_KERNEL); if (!hwname) return -ENOMEM; } else diff --git a/drivers/net/wireless/virt_wifi.c b/drivers/net/wireless/virt_wifi.c index 6a7b53ff18d753d135528deb4215a8756b31882c..c9baf4eab9609da255bf516a5f03fb4cfd74c492 100644 --- a/drivers/net/wireless/virt_wifi.c +++ b/drivers/net/wireless/virt_wifi.c @@ -654,7 +654,7 @@ int virt_wifi_register_network_simulation priv->network_simulation = ops; return 0; } -EXPORT_SYMBOL(virt_wifi_register_network_simulation); +EXPORT_SYMBOL_GPL(virt_wifi_register_network_simulation); int virt_wifi_unregister_network_simulation(void) { @@ -664,7 +664,7 @@ int virt_wifi_unregister_network_simulation(void) priv->network_simulation = NULL; return 0; } -EXPORT_SYMBOL(virt_wifi_unregister_network_simulation); +EXPORT_SYMBOL_GPL(virt_wifi_unregister_network_simulation); module_init(virt_wifi_init_module); module_exit(virt_wifi_cleanup_module); diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c index 54a633e8cb5d21ef3d2e6afcede959ce1e1e3b06..48a070a37ea9b28e2d4a1126d7ef51f0a7655cab 100644 --- a/drivers/nvdimm/bus.c +++ b/drivers/nvdimm/bus.c @@ -984,8 +984,10 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm, return -EFAULT; } - if (!desc || (desc->out_num + desc->in_num == 0) || - !test_bit(cmd, &cmd_mask)) + if (!desc || + (desc->out_num + desc->in_num == 0) || + cmd > ND_CMD_CALL || + !test_bit(cmd, &cmd_mask)) return -ENOTTY; /* fail write commands (when read-only) */ diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index a8132e8d72bb473352fbbdeec90094425be5edbd..d5359c7c811a68e41ce5a3302eaf6e97017af955 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -2200,6 +2200,17 @@ static struct nvme_subsystem *__nvme_find_get_subsystem(const char *subsysnqn) lockdep_assert_held(&nvme_subsystems_lock); + /* + * Fail matches for discovery subsystems. This results + * in each discovery controller bound to a unique subsystem. + * This avoids issues with validating controller values + * that can only be true when there is a single unique subsystem. + * There may be multiple and completely independent entities + * that provide discovery controllers. + */ + if (!strcmp(subsysnqn, NVME_DISC_SUBSYS_NAME)) + return NULL; + list_for_each_entry(subsys, &nvme_subsystems, entry) { if (strcmp(subsys->subnqn, subsysnqn)) continue; diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index 1875f6b8a907b9fcace10d4e21806fe95dcfa432..ed43b06353a391849d68aa91eedd72ae2ef84f1e 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -342,8 +342,7 @@ nvme_fc_register_localport(struct nvme_fc_port_info *pinfo, !template->ls_req || !template->fcp_io || !template->ls_abort || !template->fcp_abort || !template->max_hw_queues || !template->max_sgl_segments || - !template->max_dif_sgl_segments || !template->dma_boundary || - !template->module) { + !template->max_dif_sgl_segments || !template->dma_boundary) { ret = -EINVAL; goto out_reghost_failed; } @@ -1987,7 +1986,6 @@ nvme_fc_ctrl_free(struct kref *ref) { struct nvme_fc_ctrl *ctrl = container_of(ref, struct nvme_fc_ctrl, ref); - struct nvme_fc_lport *lport = ctrl->lport; unsigned long flags; if (ctrl->ctrl.tagset) { @@ -2013,7 +2011,6 @@ nvme_fc_ctrl_free(struct kref *ref) if (ctrl->ctrl.opts) nvmf_free_options(ctrl->ctrl.opts); kfree(ctrl); - module_put(lport->ops->module); } static void @@ -3055,15 +3052,10 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, goto out_fail; } - if (!try_module_get(lport->ops->module)) { - ret = -EUNATCH; - goto out_free_ctrl; - } - idx = ida_simple_get(&nvme_fc_ctrl_cnt, 0, 0, GFP_KERNEL); if (idx < 0) { ret = -ENOSPC; - goto out_mod_put; + goto out_free_ctrl; } ctrl->ctrl.opts = opts; @@ -3205,8 +3197,6 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, out_free_ida: put_device(ctrl->dev); ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum); -out_mod_put: - module_put(lport->ops->module); out_free_ctrl: kfree(ctrl); out_fail: diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index e8bc25aed44ca6653eb0ccc53345dd65d410bcba..588864beabd8016bb5336bd24269dc2bcef00130 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -402,7 +402,7 @@ static int nvme_update_ana_state(struct nvme_ctrl *ctrl, if (!nr_nsids) return 0; - down_write(&ctrl->namespaces_rwsem); + down_read(&ctrl->namespaces_rwsem); list_for_each_entry(ns, &ctrl->namespaces, list) { unsigned nsid = le32_to_cpu(desc->nsids[n]); @@ -413,7 +413,7 @@ static int nvme_update_ana_state(struct nvme_ctrl *ctrl, if (++n == nr_nsids) break; } - up_write(&ctrl->namespaces_rwsem); + up_read(&ctrl->namespaces_rwsem); return 0; } diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index e4f167e35353f24badbb3af48aea981ba08c6834..9711bfbdf43169ef0641595a1e329898ff4f63fa 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -815,9 +815,11 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl, if (new) nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.admin_tagset); out_free_async_qe: - nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe, - sizeof(struct nvme_command), DMA_TO_DEVICE); - ctrl->async_event_sqe.data = NULL; + if (ctrl->async_event_sqe.data) { + nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe, + sizeof(struct nvme_command), DMA_TO_DEVICE); + ctrl->async_event_sqe.data = NULL; + } out_free_queue: nvme_rdma_free_queue(&ctrl->queues[0]); return error; diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c index f0536d341f2f2a347c54216c971e824d8472648a..291f4121f516ad887f7f842761e92d3ab486196b 100644 --- a/drivers/nvme/target/fcloop.c +++ b/drivers/nvme/target/fcloop.c @@ -825,7 +825,6 @@ fcloop_targetport_delete(struct nvmet_fc_target_port *targetport) #define FCLOOP_DMABOUND_4G 0xFFFFFFFF static struct nvme_fc_port_template fctemplate = { - .module = THIS_MODULE, .localport_delete = fcloop_localport_delete, .remoteport_delete = fcloop_remoteport_delete, .create_queue = fcloop_create_queue, diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig index 0a7a470ee8594e14ecf905d683179d6ac97c105a..bd881c4e816d185055f61b5f8e3c0d8ce6bda68d 100644 --- a/drivers/nvmem/Kconfig +++ b/drivers/nvmem/Kconfig @@ -13,6 +13,16 @@ menuconfig NVMEM if NVMEM +config NVMEM_SYSFS + bool "/sys/bus/nvmem/devices/*/nvmem (sysfs interface)" + depends on SYSFS + default y + help + Say Y here to add a sysfs interface for NVMEM. + + This interface is mostly used by userspace applications to + read/write directly into nvmem. + config NVMEM_IMX_IIM tristate "i.MX IC Identification Module support" depends on ARCH_MXC || COMPILE_TEST diff --git a/drivers/nvmem/Makefile b/drivers/nvmem/Makefile index 4e8c61628f1a20eb6b0794fef54e6ba25f5231f5..1847ec17e97da08c922cf69b9d59c4c8c3c868b5 100644 --- a/drivers/nvmem/Makefile +++ b/drivers/nvmem/Makefile @@ -6,6 +6,9 @@ obj-$(CONFIG_NVMEM) += nvmem_core.o nvmem_core-y := core.o +obj-$(CONFIG_NVMEM_SYSFS) += nvmem_sysfs.o +nvmem_sysfs-y := nvmem-sysfs.o + # Devices obj-$(CONFIG_NVMEM_BCM_OCOTP) += nvmem-bcm-ocotp.o nvmem-bcm-ocotp-y := bcm-ocotp.o diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c index 148f60eccfb71b0d629ba26e1117c8eb7f49f28b..f40ef8ea6ab50baa44eeaee7db3963016cfffa17 100644 --- a/drivers/nvmem/core.c +++ b/drivers/nvmem/core.c @@ -19,31 +19,14 @@ #include #include #include +#include #include #include #include #include #include -struct nvmem_device { - const char *name; - struct module *owner; - struct device dev; - int stride; - int word_size; - int id; - int users; - size_t size; - bool read_only; - int flags; - struct bin_attribute eeprom; - struct device *base_dev; - nvmem_reg_read_t reg_read; - nvmem_reg_write_t reg_write; - void *priv; -}; - -#define FLAG_COMPAT BIT(0) +#include "nvmem.h" struct nvmem_cell { const char *name; @@ -59,14 +42,9 @@ struct nvmem_cell { static DEFINE_MUTEX(nvmem_mutex); static DEFINE_IDA(nvmem_ida); -static LIST_HEAD(nvmem_cells); -static DEFINE_MUTEX(nvmem_cells_mutex); +static DEFINE_MUTEX(nvmem_cell_mutex); +static LIST_HEAD(nvmem_cell_tables); -#ifdef CONFIG_DEBUG_LOCK_ALLOC -static struct lock_class_key eeprom_lock_key; -#endif - -#define to_nvmem_device(d) container_of(d, struct nvmem_device, dev) static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset, void *val, size_t bytes) { @@ -85,168 +63,6 @@ static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset, return -EINVAL; } -static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, - char *buf, loff_t pos, size_t count) -{ - struct device *dev; - struct nvmem_device *nvmem; - int rc; - - if (attr->private) - dev = attr->private; - else - dev = container_of(kobj, struct device, kobj); - nvmem = to_nvmem_device(dev); - - /* Stop the user from reading */ - if (pos >= nvmem->size) - return 0; - - if (count < nvmem->word_size) - return -EINVAL; - - if (pos + count > nvmem->size) - count = nvmem->size - pos; - - count = round_down(count, nvmem->word_size); - - rc = nvmem_reg_read(nvmem, pos, buf, count); - - if (rc) - return rc; - - return count; -} - -static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, - char *buf, loff_t pos, size_t count) -{ - struct device *dev; - struct nvmem_device *nvmem; - int rc; - - if (attr->private) - dev = attr->private; - else - dev = container_of(kobj, struct device, kobj); - nvmem = to_nvmem_device(dev); - - /* Stop the user from writing */ - if (pos >= nvmem->size) - return -EFBIG; - - if (count < nvmem->word_size) - return -EINVAL; - - if (pos + count > nvmem->size) - count = nvmem->size - pos; - - count = round_down(count, nvmem->word_size); - - rc = nvmem_reg_write(nvmem, pos, buf, count); - - if (rc) - return rc; - - return count; -} - -/* default read/write permissions */ -static struct bin_attribute bin_attr_rw_nvmem = { - .attr = { - .name = "nvmem", - .mode = S_IWUSR | S_IRUGO, - }, - .read = bin_attr_nvmem_read, - .write = bin_attr_nvmem_write, -}; - -static struct bin_attribute *nvmem_bin_rw_attributes[] = { - &bin_attr_rw_nvmem, - NULL, -}; - -static const struct attribute_group nvmem_bin_rw_group = { - .bin_attrs = nvmem_bin_rw_attributes, -}; - -static const struct attribute_group *nvmem_rw_dev_groups[] = { - &nvmem_bin_rw_group, - NULL, -}; - -/* read only permission */ -static struct bin_attribute bin_attr_ro_nvmem = { - .attr = { - .name = "nvmem", - .mode = S_IRUGO, - }, - .read = bin_attr_nvmem_read, -}; - -static struct bin_attribute *nvmem_bin_ro_attributes[] = { - &bin_attr_ro_nvmem, - NULL, -}; - -static const struct attribute_group nvmem_bin_ro_group = { - .bin_attrs = nvmem_bin_ro_attributes, -}; - -static const struct attribute_group *nvmem_ro_dev_groups[] = { - &nvmem_bin_ro_group, - NULL, -}; - -/* default read/write permissions, root only */ -static struct bin_attribute bin_attr_rw_root_nvmem = { - .attr = { - .name = "nvmem", - .mode = S_IWUSR | S_IRUSR, - }, - .read = bin_attr_nvmem_read, - .write = bin_attr_nvmem_write, -}; - -static struct bin_attribute *nvmem_bin_rw_root_attributes[] = { - &bin_attr_rw_root_nvmem, - NULL, -}; - -static const struct attribute_group nvmem_bin_rw_root_group = { - .bin_attrs = nvmem_bin_rw_root_attributes, -}; - -static const struct attribute_group *nvmem_rw_root_dev_groups[] = { - &nvmem_bin_rw_root_group, - NULL, -}; - -/* read only permission, root only */ -static struct bin_attribute bin_attr_ro_root_nvmem = { - .attr = { - .name = "nvmem", - .mode = S_IRUSR, - }, - .read = bin_attr_nvmem_read, -}; - -static struct bin_attribute *nvmem_bin_ro_root_attributes[] = { - &bin_attr_ro_root_nvmem, - NULL, -}; - -static const struct attribute_group nvmem_bin_ro_root_group = { - .bin_attrs = nvmem_bin_ro_root_attributes, -}; - -static const struct attribute_group *nvmem_ro_root_dev_groups[] = { - &nvmem_bin_ro_root_group, - NULL, -}; - static void nvmem_release(struct device *dev) { struct nvmem_device *nvmem = to_nvmem_device(dev); @@ -283,49 +99,28 @@ static struct nvmem_device *of_nvmem_find(struct device_node *nvmem_np) return to_nvmem_device(d); } -static struct nvmem_cell *nvmem_find_cell(const char *cell_id) -{ - struct nvmem_cell *p; - - mutex_lock(&nvmem_cells_mutex); - - list_for_each_entry(p, &nvmem_cells, node) - if (!strcmp(p->name, cell_id)) { - mutex_unlock(&nvmem_cells_mutex); - return p; - } - - mutex_unlock(&nvmem_cells_mutex); - - return NULL; -} - static void nvmem_cell_drop(struct nvmem_cell *cell) { - mutex_lock(&nvmem_cells_mutex); + mutex_lock(&nvmem_mutex); list_del(&cell->node); - mutex_unlock(&nvmem_cells_mutex); + mutex_unlock(&nvmem_mutex); of_node_put(cell->np); kfree(cell); } static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem) { - struct nvmem_cell *cell; - struct list_head *p, *n; + struct nvmem_cell *cell, *p; - list_for_each_safe(p, n, &nvmem_cells) { - cell = list_entry(p, struct nvmem_cell, node); - if (cell->nvmem == nvmem) - nvmem_cell_drop(cell); - } + list_for_each_entry_safe(cell, p, &nvmem->cells, node) + nvmem_cell_drop(cell); } static void nvmem_cell_add(struct nvmem_cell *cell) { - mutex_lock(&nvmem_cells_mutex); - list_add_tail(&cell->node, &nvmem_cells); - mutex_unlock(&nvmem_cells_mutex); + mutex_lock(&nvmem_mutex); + list_add_tail(&cell->node, &cell->nvmem->cells); + mutex_unlock(&nvmem_mutex); } static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem, @@ -404,48 +199,41 @@ int nvmem_add_cells(struct nvmem_device *nvmem, } EXPORT_SYMBOL_GPL(nvmem_add_cells); -/* - * nvmem_setup_compat() - Create an additional binary entry in - * drivers sys directory, to be backwards compatible with the older - * drivers/misc/eeprom drivers. - */ -static int nvmem_setup_compat(struct nvmem_device *nvmem, - const struct nvmem_config *config) +static int nvmem_add_cells_from_table(struct nvmem_device *nvmem) { - int rval; - - if (!config->base_dev) - return -EINVAL; - - if (nvmem->read_only) { - if (config->root_only) - nvmem->eeprom = bin_attr_ro_root_nvmem; - else - nvmem->eeprom = bin_attr_ro_nvmem; - } else { - if (config->root_only) - nvmem->eeprom = bin_attr_rw_root_nvmem; - else - nvmem->eeprom = bin_attr_rw_nvmem; - } - nvmem->eeprom.attr.name = "eeprom"; - nvmem->eeprom.size = nvmem->size; -#ifdef CONFIG_DEBUG_LOCK_ALLOC - nvmem->eeprom.attr.key = &eeprom_lock_key; -#endif - nvmem->eeprom.private = &nvmem->dev; - nvmem->base_dev = config->base_dev; - - rval = device_create_bin_file(nvmem->base_dev, &nvmem->eeprom); - if (rval) { - dev_err(&nvmem->dev, - "Failed to create eeprom binary file %d\n", rval); - return rval; + const struct nvmem_cell_info *info; + struct nvmem_cell_table *table; + struct nvmem_cell *cell; + int rval = 0, i; + + mutex_lock(&nvmem_cell_mutex); + list_for_each_entry(table, &nvmem_cell_tables, node) { + if (strcmp(nvmem_dev_name(nvmem), table->nvmem_name) == 0) { + for (i = 0; i < table->ncells; i++) { + info = &table->cells[i]; + + cell = kzalloc(sizeof(*cell), GFP_KERNEL); + if (!cell) { + rval = -ENOMEM; + goto out; + } + + rval = nvmem_cell_info_to_nvmem_cell(nvmem, + info, + cell); + if (rval) { + kfree(cell); + goto out; + } + + nvmem_cell_add(cell); + } + } } - nvmem->flags |= FLAG_COMPAT; - - return 0; +out: + mutex_unlock(&nvmem_cell_mutex); + return rval; } static int nvmem_add_cells_from_of(struct nvmem_device *nvmem) @@ -528,6 +316,9 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config) return ERR_PTR(rval); } + kref_init(&nvmem->refcnt); + INIT_LIST_HEAD(&nvmem->cells); + nvmem->id = rval; nvmem->owner = config->owner; if (!nvmem->owner && config->dev->driver) @@ -554,14 +345,7 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config) nvmem->read_only = device_property_present(config->dev, "read-only") | config->read_only; - if (config->root_only) - nvmem->dev.groups = nvmem->read_only ? - nvmem_ro_root_dev_groups : - nvmem_rw_root_dev_groups; - else - nvmem->dev.groups = nvmem->read_only ? - nvmem_ro_dev_groups : - nvmem_rw_dev_groups; + nvmem->dev.groups = nvmem_sysfs_get_groups(nvmem, config); device_initialize(&nvmem->dev); @@ -572,7 +356,7 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config) goto err_put_device; if (config->compat) { - rval = nvmem_setup_compat(nvmem, config); + rval = nvmem_sysfs_setup_compat(nvmem, config); if (rval) goto err_device_del; } @@ -583,15 +367,21 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config) goto err_teardown_compat; } + rval = nvmem_add_cells_from_table(nvmem); + if (rval) + goto err_remove_cells; + rval = nvmem_add_cells_from_of(nvmem); if (rval) - goto err_teardown_compat; + goto err_remove_cells; return nvmem; +err_remove_cells: + nvmem_device_remove_all_cells(nvmem); err_teardown_compat: if (config->compat) - device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom); + nvmem_sysfs_remove_compat(nvmem, config); err_device_del: device_del(&nvmem->dev); err_put_device: @@ -601,6 +391,20 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config) } EXPORT_SYMBOL_GPL(nvmem_register); +static void nvmem_device_release(struct kref *kref) +{ + struct nvmem_device *nvmem; + + nvmem = container_of(kref, struct nvmem_device, refcnt); + + if (nvmem->flags & FLAG_COMPAT) + device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom); + + nvmem_device_remove_all_cells(nvmem); + device_del(&nvmem->dev); + put_device(&nvmem->dev); +} + /** * nvmem_unregister() - Unregister previously registered nvmem device * @@ -610,19 +414,7 @@ EXPORT_SYMBOL_GPL(nvmem_register); */ int nvmem_unregister(struct nvmem_device *nvmem) { - mutex_lock(&nvmem_mutex); - if (nvmem->users) { - mutex_unlock(&nvmem_mutex); - return -EBUSY; - } - mutex_unlock(&nvmem_mutex); - - if (nvmem->flags & FLAG_COMPAT) - device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom); - - nvmem_device_remove_all_cells(nvmem); - device_del(&nvmem->dev); - put_device(&nvmem->dev); + kref_put(&nvmem->refcnt, nvmem_device_release); return 0; } @@ -695,52 +487,32 @@ static struct nvmem_device *__nvmem_device_get(struct device_node *np, { struct nvmem_device *nvmem = NULL; - mutex_lock(&nvmem_mutex); - - if (np) { - nvmem = of_nvmem_find(np); - if (!nvmem) { - mutex_unlock(&nvmem_mutex); - return ERR_PTR(-EPROBE_DEFER); - } - } else { - struct nvmem_cell *cell = nvmem_find_cell(cell_id); - - if (cell) { - nvmem = cell->nvmem; - *cellp = cell; - } - - if (!nvmem) { - mutex_unlock(&nvmem_mutex); - return ERR_PTR(-ENOENT); - } - } + if (!np) + return ERR_PTR(-ENOENT); - nvmem->users++; + mutex_lock(&nvmem_mutex); + nvmem = of_nvmem_find(np); mutex_unlock(&nvmem_mutex); + if (!nvmem) + return ERR_PTR(-EPROBE_DEFER); if (!try_module_get(nvmem->owner)) { dev_err(&nvmem->dev, "could not increase module refcount for cell %s\n", nvmem->name); - mutex_lock(&nvmem_mutex); - nvmem->users--; - mutex_unlock(&nvmem_mutex); - return ERR_PTR(-EINVAL); } + kref_get(&nvmem->refcnt); + return nvmem; } static void __nvmem_device_put(struct nvmem_device *nvmem) { module_put(nvmem->owner); - mutex_lock(&nvmem_mutex); - nvmem->users--; - mutex_unlock(&nvmem_mutex); + kref_put(&nvmem->refcnt, nvmem_device_release); } static struct nvmem_device *nvmem_find(const char *name) @@ -900,7 +672,7 @@ nvmem_find_cell_by_node(struct nvmem_device *nvmem, struct device_node *np) struct nvmem_cell *cell = NULL; mutex_lock(&nvmem_mutex); - list_for_each_entry(cell, &nvmem_cells, node) { + list_for_each_entry(cell, &nvmem->cells, node) { if (np == cell->np) break; } @@ -1390,6 +1162,45 @@ int nvmem_device_write(struct nvmem_device *nvmem, } EXPORT_SYMBOL_GPL(nvmem_device_write); +/** + * nvmem_add_cell_table() - register a table of cell info entries + * + * @table: table of cell info entries + */ +void nvmem_add_cell_table(struct nvmem_cell_table *table) +{ + mutex_lock(&nvmem_cell_mutex); + list_add_tail(&table->node, &nvmem_cell_tables); + mutex_unlock(&nvmem_cell_mutex); +} +EXPORT_SYMBOL_GPL(nvmem_add_cell_table); + +/** + * nvmem_del_cell_table() - remove a previously registered cell info table + * + * @table: table of cell info entries + */ +void nvmem_del_cell_table(struct nvmem_cell_table *table) +{ + mutex_lock(&nvmem_cell_mutex); + list_del(&table->node); + mutex_unlock(&nvmem_cell_mutex); +} +EXPORT_SYMBOL_GPL(nvmem_del_cell_table); + +/** + * nvmem_dev_name() - Get the name of a given nvmem device. + * + * @nvmem: nvmem device. + * + * Return: name of the nvmem device. + */ +const char *nvmem_dev_name(struct nvmem_device *nvmem) +{ + return dev_name(&nvmem->dev); +} +EXPORT_SYMBOL_GPL(nvmem_dev_name); + static int __init nvmem_init(void) { return bus_register(&nvmem_bus_type); diff --git a/drivers/nvmem/nvmem-sysfs.c b/drivers/nvmem/nvmem-sysfs.c new file mode 100644 index 0000000000000000000000000000000000000000..978bacd18d0a180afc1b6b1dd67a92801c0e525a --- /dev/null +++ b/drivers/nvmem/nvmem-sysfs.c @@ -0,0 +1,230 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2019, Linaro Limited + */ +#include "nvmem.h" + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +static struct lock_class_key eeprom_lock_key; +#endif + +static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t pos, size_t count) +{ + struct device *dev; + struct nvmem_device *nvmem; + int rc; + + if (attr->private) + dev = attr->private; + else + dev = container_of(kobj, struct device, kobj); + nvmem = to_nvmem_device(dev); + + /* Stop the user from reading */ + if (pos >= nvmem->size) + return 0; + + if (count < nvmem->word_size) + return -EINVAL; + + if (pos + count > nvmem->size) + count = nvmem->size - pos; + + count = round_down(count, nvmem->word_size); + + rc = nvmem->reg_read(nvmem->priv, pos, buf, count); + + if (rc) + return rc; + + return count; +} + +static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t pos, size_t count) +{ + struct device *dev; + struct nvmem_device *nvmem; + int rc; + + if (attr->private) + dev = attr->private; + else + dev = container_of(kobj, struct device, kobj); + nvmem = to_nvmem_device(dev); + + /* Stop the user from writing */ + if (pos >= nvmem->size) + return -EFBIG; + + if (count < nvmem->word_size) + return -EINVAL; + + if (pos + count > nvmem->size) + count = nvmem->size - pos; + + count = round_down(count, nvmem->word_size); + + rc = nvmem->reg_write(nvmem->priv, pos, buf, count); + + if (rc) + return rc; + + return count; +} + +/* default read/write permissions */ +static struct bin_attribute bin_attr_rw_nvmem = { + .attr = { + .name = "nvmem", + .mode = 0644, + }, + .read = bin_attr_nvmem_read, + .write = bin_attr_nvmem_write, +}; + +static struct bin_attribute *nvmem_bin_rw_attributes[] = { + &bin_attr_rw_nvmem, + NULL, +}; + +static const struct attribute_group nvmem_bin_rw_group = { + .bin_attrs = nvmem_bin_rw_attributes, +}; + +static const struct attribute_group *nvmem_rw_dev_groups[] = { + &nvmem_bin_rw_group, + NULL, +}; + +/* read only permission */ +static struct bin_attribute bin_attr_ro_nvmem = { + .attr = { + .name = "nvmem", + .mode = 0444, + }, + .read = bin_attr_nvmem_read, +}; + +static struct bin_attribute *nvmem_bin_ro_attributes[] = { + &bin_attr_ro_nvmem, + NULL, +}; + +static const struct attribute_group nvmem_bin_ro_group = { + .bin_attrs = nvmem_bin_ro_attributes, +}; + +static const struct attribute_group *nvmem_ro_dev_groups[] = { + &nvmem_bin_ro_group, + NULL, +}; + +/* default read/write permissions, root only */ +static struct bin_attribute bin_attr_rw_root_nvmem = { + .attr = { + .name = "nvmem", + .mode = 0600, + }, + .read = bin_attr_nvmem_read, + .write = bin_attr_nvmem_write, +}; + +static struct bin_attribute *nvmem_bin_rw_root_attributes[] = { + &bin_attr_rw_root_nvmem, + NULL, +}; + +static const struct attribute_group nvmem_bin_rw_root_group = { + .bin_attrs = nvmem_bin_rw_root_attributes, +}; + +static const struct attribute_group *nvmem_rw_root_dev_groups[] = { + &nvmem_bin_rw_root_group, + NULL, +}; + +/* read only permission, root only */ +static struct bin_attribute bin_attr_ro_root_nvmem = { + .attr = { + .name = "nvmem", + .mode = 0400, + }, + .read = bin_attr_nvmem_read, +}; + +static struct bin_attribute *nvmem_bin_ro_root_attributes[] = { + &bin_attr_ro_root_nvmem, + NULL, +}; + +static const struct attribute_group nvmem_bin_ro_root_group = { + .bin_attrs = nvmem_bin_ro_root_attributes, +}; + +static const struct attribute_group *nvmem_ro_root_dev_groups[] = { + &nvmem_bin_ro_root_group, + NULL, +}; + +const struct attribute_group **nvmem_sysfs_get_groups( + struct nvmem_device *nvmem, + const struct nvmem_config *config) +{ + if (config->root_only) + return nvmem->read_only ? + nvmem_ro_root_dev_groups : + nvmem_rw_root_dev_groups; + + return nvmem->read_only ? nvmem_ro_dev_groups : nvmem_rw_dev_groups; +} + +/* + * nvmem_setup_compat() - Create an additional binary entry in + * drivers sys directory, to be backwards compatible with the older + * drivers/misc/eeprom drivers. + */ +int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem, + const struct nvmem_config *config) +{ + int rval; + + if (!config->compat) + return 0; + + if (!config->base_dev) + return -EINVAL; + + if (nvmem->read_only) + nvmem->eeprom = bin_attr_ro_root_nvmem; + else + nvmem->eeprom = bin_attr_rw_root_nvmem; + nvmem->eeprom.attr.name = "eeprom"; + nvmem->eeprom.size = nvmem->size; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + nvmem->eeprom.attr.key = &eeprom_lock_key; +#endif + nvmem->eeprom.private = &nvmem->dev; + nvmem->base_dev = config->base_dev; + + rval = device_create_bin_file(nvmem->base_dev, &nvmem->eeprom); + if (rval) { + dev_err(&nvmem->dev, + "Failed to create eeprom binary file %d\n", rval); + return rval; + } + + nvmem->flags |= FLAG_COMPAT; + + return 0; +} + +void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem, + const struct nvmem_config *config) +{ + if (config->compat) + device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom); +} diff --git a/drivers/nvmem/nvmem.h b/drivers/nvmem/nvmem.h new file mode 100644 index 0000000000000000000000000000000000000000..778f3107612173e7c19a75bbeac673da7e5111de --- /dev/null +++ b/drivers/nvmem/nvmem.h @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _DRIVERS_NVMEM_H +#define _DRIVERS_NVMEM_H + +#include +#include +#include +#include +#include +#include + +struct nvmem_device { + const char *name; + struct module *owner; + struct device dev; + int stride; + int word_size; + int id; + struct kref refcnt; + size_t size; + bool read_only; + int flags; + struct bin_attribute eeprom; + struct device *base_dev; + struct list_head cells; + nvmem_reg_read_t reg_read; + nvmem_reg_write_t reg_write; + void *priv; +}; + +#define to_nvmem_device(d) container_of(d, struct nvmem_device, dev) +#define FLAG_COMPAT BIT(0) + +#ifdef CONFIG_NVMEM_SYSFS +const struct attribute_group **nvmem_sysfs_get_groups( + struct nvmem_device *nvmem, + const struct nvmem_config *config); +int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem, + const struct nvmem_config *config); +void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem, + const struct nvmem_config *config); +#else +static inline const struct attribute_group **nvmem_sysfs_get_groups( + struct nvmem_device *nvmem, + const struct nvmem_config *config) +{ + return NULL; +} + +static inline int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem, + const struct nvmem_config *config) +{ + return -ENOSYS; +} +static inline void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem, + const struct nvmem_config *config) +{ +} +#endif /* CONFIG_NVMEM_SYSFS */ + +#endif /* _DRIVERS_NVMEM_H */ diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index 85b0055a5ae4801c2f09c6e6bd113722696f51bf..39d049775158892db3a2c3b06dd1437eb4a96399 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c @@ -80,6 +80,71 @@ void of_fdt_limit_memory(int limit) } } +/** + * of_fdt_get_ddrhbb - Return the highest bank bit of ddr on the current device + * + * On match, returns a non-zero positive value which matches the highest bank + * bit. + * Otherwise returns -ENOENT. + */ +int of_fdt_get_ddrhbb(int channel, int rank) +{ + int memory; + int len; + int ret; + /* Single spaces reserved for channel(0-9), rank(0-9) */ + char pname[] = "ddr_device_hbb_ch _rank "; + fdt32_t *prop = NULL; + + memory = fdt_path_offset(initial_boot_params, "/memory"); + if (memory > 0) { + snprintf(pname, sizeof(pname), + "ddr_device_hbb_ch%d_rank%d", channel, rank); + prop = fdt_getprop_w(initial_boot_params, memory, + pname, &len); + } + + if (!prop || len != sizeof(u32)) + return -ENOENT; + + ret = fdt32_to_cpu(*prop); + + return ret; +} +EXPORT_SYMBOL_GPL(of_fdt_get_ddrhbb); + +/** + * of_fdt_get_ddrrank - Return the rank of ddr on the current device + * + * On match, returns a non-zero positive value which matches the ddr rank. + * Otherwise returns -ENOENT. + */ +int of_fdt_get_ddrrank(int channel) +{ + int memory; + int len; + int ret; + /* Single space reserved for channel(0-9) */ + char pname[] = "ddr_device_rank_ch "; + fdt32_t *prop = NULL; + + memory = fdt_path_offset(initial_boot_params, "/memory"); + if (memory > 0) { + snprintf(pname, sizeof(pname), + "ddr_device_rank_ch%d", channel); + prop = fdt_getprop_w(initial_boot_params, memory, + pname, &len); + } + + if (!prop || len != sizeof(u32)) + return -ENOENT; + + ret = fdt32_to_cpu(*prop); + + return ret; +} +EXPORT_SYMBOL_GPL(of_fdt_get_ddrrank); + /** * of_fdt_get_ddrtype - Return the type of ddr (4/5) on the current device * diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c index 514528b3566ffcb787a5e790be0c122844fa029f..a77bfeac867db8699aa945571a33e6cb7324a2ca 100644 --- a/drivers/of/overlay.c +++ b/drivers/of/overlay.c @@ -261,6 +261,8 @@ static struct property *dup_and_fixup_symbol_prop( of_property_set_flag(new_prop, OF_DYNAMIC); + kfree(target_path); + return new_prop; err_free_new_prop: diff --git a/drivers/of/platform.c b/drivers/of/platform.c index 6e9e2755b9ea804c4c742a7ec599c51a0c00f00b..8b339f03a0ad483677d48acdf4f8116b7631ced0 100644 --- a/drivers/of/platform.c +++ b/drivers/of/platform.c @@ -20,6 +20,7 @@ #include #include #include +#include #include const struct of_device_id of_default_bus_match_table[] = { @@ -191,6 +192,7 @@ static struct platform_device *of_platform_device_create_pdata( dev->dev.bus = &platform_bus_type; dev->dev.platform_data = platform_data; of_msi_configure(&dev->dev, dev->dev.of_node); + of_reserved_mem_device_init_by_idx(&dev->dev, dev->dev.of_node, 0); if (of_device_add(dev) != 0) { platform_device_put(dev); diff --git a/drivers/of/property.c b/drivers/of/property.c index 43cb50c13b53873d96d298b8579024b1b26e3146..6bc0f61596b4e1bac879391099e45f089e5ee3a2 100644 --- a/drivers/of/property.c +++ b/drivers/of/property.c @@ -1222,6 +1222,8 @@ DEFINE_SIMPLE_PROP(interrupt_parent, "interrupt-parent", NULL) DEFINE_SIMPLE_PROP(dmas, "dmas", "#dma-cells") DEFINE_SIMPLE_PROP(power_domains, "power-domains", "#power-domain-cells") DEFINE_SIMPLE_PROP(hwlocks, "hwlocks", "#hwlock-cells") +DEFINE_SIMPLE_PROP(extcon, "extcon", NULL) +DEFINE_SIMPLE_PROP(phys, "phys", "#phy-cells") DEFINE_SIMPLE_PROP(pinctrl0, "pinctrl-0", NULL) DEFINE_SIMPLE_PROP(pinctrl1, "pinctrl-1", NULL) DEFINE_SIMPLE_PROP(pinctrl2, "pinctrl-2", NULL) @@ -1253,6 +1255,8 @@ static const struct supplier_bindings of_supplier_bindings[] = { { .parse_prop = parse_dmas, }, { .parse_prop = parse_power_domains, }, { .parse_prop = parse_hwlocks, }, + { .parse_prop = parse_extcon, }, + { .parse_prop = parse_phys, }, { .parse_prop = parse_pinctrl0, }, { .parse_prop = parse_pinctrl1, }, { .parse_prop = parse_pinctrl2, }, diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c index 808571f7f6ef9b97d44280878bd67acd411584ef..29f17c3449aa44a62c5af58562bd7075a3ef777d 100644 --- a/drivers/of/unittest.c +++ b/drivers/of/unittest.c @@ -772,6 +772,10 @@ static void __init of_unittest_changeset(void) unittest(!of_changeset_revert(&chgset), "revert failed\n"); of_changeset_destroy(&chgset); + + of_node_put(n1); + of_node_put(n2); + of_node_put(n21); #endif } @@ -1055,10 +1059,13 @@ static void __init of_unittest_platform_populate(void) of_platform_populate(np, match, NULL, &test_bus->dev); for_each_child_of_node(np, child) { - for_each_child_of_node(child, grandchild) - unittest(of_find_device_by_node(grandchild), + for_each_child_of_node(child, grandchild) { + pdev = of_find_device_by_node(grandchild); + unittest(pdev, "Could not create device for node '%pOFn'\n", grandchild); + of_dev_put(pdev); + } } of_platform_depopulate(&test_bus->dev); @@ -2441,8 +2448,11 @@ static __init void of_unittest_overlay_high_level(void) goto err_unlock; } if (__of_add_property(of_symbols, new_prop)) { + kfree(new_prop->name); + kfree(new_prop->value); + kfree(new_prop); /* "name" auto-generated by unflatten */ - if (!strcmp(new_prop->name, "name")) + if (!strcmp(prop->name, "name")) continue; unittest(0, "duplicate property '%s' in overlay_base node __symbols__", prop->name); diff --git a/drivers/pci/endpoint/pci-epc-mem.c b/drivers/pci/endpoint/pci-epc-mem.c index 2bf8bd1f0563eaf4740874ec1e3f0c836adc5687..0471643cf536d1a383c0582b2d84de07ab5949e7 100644 --- a/drivers/pci/endpoint/pci-epc-mem.c +++ b/drivers/pci/endpoint/pci-epc-mem.c @@ -79,6 +79,7 @@ int __pci_epc_mem_init(struct pci_epc *epc, phys_addr_t phys_base, size_t size, mem->page_size = page_size; mem->pages = pages; mem->size = size; + mutex_init(&mem->lock); epc->mem = mem; @@ -122,7 +123,7 @@ void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc, phys_addr_t *phys_addr, size_t size) { int pageno; - void __iomem *virt_addr; + void __iomem *virt_addr = NULL; struct pci_epc_mem *mem = epc->mem; unsigned int page_shift = ilog2(mem->page_size); int order; @@ -130,15 +131,18 @@ void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc, size = ALIGN(size, mem->page_size); order = pci_epc_mem_get_order(mem, size); + mutex_lock(&mem->lock); pageno = bitmap_find_free_region(mem->bitmap, mem->pages, order); if (pageno < 0) - return NULL; + goto ret; *phys_addr = mem->phys_base + (pageno << page_shift); virt_addr = ioremap(*phys_addr, size); if (!virt_addr) bitmap_release_region(mem->bitmap, pageno, order); +ret: + mutex_unlock(&mem->lock); return virt_addr; } EXPORT_SYMBOL_GPL(pci_epc_mem_alloc_addr); @@ -164,7 +168,9 @@ void pci_epc_mem_free_addr(struct pci_epc *epc, phys_addr_t phys_addr, pageno = (phys_addr - mem->phys_base) >> page_shift; size = ALIGN(size, mem->page_size); order = pci_epc_mem_get_order(mem, size); + mutex_lock(&mem->lock); bitmap_release_region(mem->bitmap, pageno, order); + mutex_unlock(&mem->lock); } EXPORT_SYMBOL_GPL(pci_epc_mem_free_addr); diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index c3e3f5358b3ba123127f23816d7aa51686f095ed..07940d1d83b70bdbdbb382b6f13a03cb58dde30e 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c @@ -627,17 +627,15 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id) if (atomic_fetch_and(~RERUN_ISR, &ctrl->pending_events) & RERUN_ISR) { ret = pciehp_isr(irq, dev_id); enable_irq(irq); - if (ret != IRQ_WAKE_THREAD) { - pci_config_pm_runtime_put(pdev); - return ret; - } + if (ret != IRQ_WAKE_THREAD) + goto out; } synchronize_hardirq(irq); events = atomic_xchg(&ctrl->pending_events, 0); if (!events) { - pci_config_pm_runtime_put(pdev); - return IRQ_NONE; + ret = IRQ_NONE; + goto out; } /* Check Attention Button Pressed */ @@ -666,10 +664,12 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id) pciehp_handle_presence_or_link_change(slot, events); up_read(&ctrl->reset_lock); + ret = IRQ_HANDLED; +out: pci_config_pm_runtime_put(pdev); ctrl->ist_running = false; wake_up(&ctrl->requester); - return IRQ_HANDLED; + return ret; } static int pciehp_poll(void *data) diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index e9ede82ee2c259b52ee60fbd59ff92214e24e689..6109afcdc54981ca0abdcf56b73b1bb55bb16d88 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -2,6 +2,8 @@ #ifndef DRIVERS_PCI_H #define DRIVERS_PCI_H +#include + #define PCI_FIND_CAP_TTL 48 #define PCI_VSEC_ID_INTEL_TBT 0x1234 /* Thunderbolt */ @@ -293,6 +295,11 @@ struct pci_sriov { u16 subsystem_device; /* VF subsystem device */ resource_size_t barsz[PCI_SRIOV_NUM_BARS]; /* VF BAR size */ bool drivers_autoprobe; /* Auto probing of VFs by driver */ + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); }; /* pci_dev priv_flags */ diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index 1117b25fbe0bb9bedec38a5d844b113afbabe4e8..db2efa219028cd11d7e11dbc4b6ea0740ed758af 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -67,6 +67,7 @@ struct pcie_link_state { u32 clkpm_capable:1; /* Clock PM capable? */ u32 clkpm_enabled:1; /* Current Clock PM state */ u32 clkpm_default:1; /* Default Clock PM state by BIOS */ + u32 clkpm_disable:1; /* Clock PM disabled */ /* Exit latencies */ struct aspm_latency latency_up; /* Upstream direction exit latency */ @@ -164,8 +165,11 @@ static void pcie_set_clkpm_nocheck(struct pcie_link_state *link, int enable) static void pcie_set_clkpm(struct pcie_link_state *link, int enable) { - /* Don't enable Clock PM if the link is not Clock PM capable */ - if (!link->clkpm_capable) + /* + * Don't enable Clock PM if the link is not Clock PM capable + * or Clock PM is disabled + */ + if (!link->clkpm_capable || link->clkpm_disable) enable = 0; /* Need nothing if the specified equals to current state */ if (link->clkpm_enabled == enable) @@ -195,7 +199,8 @@ static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist) } link->clkpm_enabled = enabled; link->clkpm_default = enabled; - link->clkpm_capable = (blacklist) ? 0 : capable; + link->clkpm_capable = capable; + link->clkpm_disable = blacklist ? 1 : 0; } static bool pcie_retrain_link(struct pcie_link_state *link) @@ -747,9 +752,9 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state) /* Enable what we need to enable */ pci_clear_and_set_dword(parent, up_cap_ptr + PCI_L1SS_CTL1, - PCI_L1SS_CAP_L1_PM_SS, val); + PCI_L1SS_CTL1_L1SS_MASK, val); pci_clear_and_set_dword(child, dw_cap_ptr + PCI_L1SS_CTL1, - PCI_L1SS_CAP_L1_PM_SS, val); + PCI_L1SS_CTL1_L1SS_MASK, val); } static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val) @@ -1106,10 +1111,9 @@ static void __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem) link->aspm_disable |= ASPM_STATE_L1; pcie_config_aspm_link(link, policy_to_aspm_state(link)); - if (state & PCIE_LINK_STATE_CLKPM) { - link->clkpm_capable = 0; - pcie_set_clkpm(link, 0); - } + if (state & PCIE_LINK_STATE_CLKPM) + link->clkpm_disable = 1; + pcie_set_clkpm(link, policy_to_clkpm_state(link)); mutex_unlock(&aspm_lock); if (sem) up_read(&pci_bus_sem); diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 419dda6dbd1662ca1840123d6c40c2a991722fd2..ca41cff2e68cd5ced8a0ea762bb4e7e2f8eeb1d3 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -1947,26 +1947,92 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_1, quirk /* * IO-APIC1 on 6300ESB generates boot interrupts, see Intel order no * 300641-004US, section 5.7.3. + * + * Core IO on Xeon E5 1600/2600/4600, see Intel order no 326509-003. + * Core IO on Xeon E5 v2, see Intel order no 329188-003. + * Core IO on Xeon E7 v2, see Intel order no 329595-002. + * Core IO on Xeon E5 v3, see Intel order no 330784-003. + * Core IO on Xeon E7 v3, see Intel order no 332315-001US. + * Core IO on Xeon E5 v4, see Intel order no 333810-002US. + * Core IO on Xeon E7 v4, see Intel order no 332315-001US. + * Core IO on Xeon D-1500, see Intel order no 332051-001. + * Core IO on Xeon Scalable, see Intel order no 610950. */ -#define INTEL_6300_IOAPIC_ABAR 0x40 +#define INTEL_6300_IOAPIC_ABAR 0x40 /* Bus 0, Dev 29, Func 5 */ #define INTEL_6300_DISABLE_BOOT_IRQ (1<<14) +#define INTEL_CIPINTRC_CFG_OFFSET 0x14C /* Bus 0, Dev 5, Func 0 */ +#define INTEL_CIPINTRC_DIS_INTX_ICH (1<<25) + static void quirk_disable_intel_boot_interrupt(struct pci_dev *dev) { u16 pci_config_word; + u32 pci_config_dword; if (noioapicquirk) return; - pci_read_config_word(dev, INTEL_6300_IOAPIC_ABAR, &pci_config_word); - pci_config_word |= INTEL_6300_DISABLE_BOOT_IRQ; - pci_write_config_word(dev, INTEL_6300_IOAPIC_ABAR, pci_config_word); - + switch (dev->device) { + case PCI_DEVICE_ID_INTEL_ESB_10: + pci_read_config_word(dev, INTEL_6300_IOAPIC_ABAR, + &pci_config_word); + pci_config_word |= INTEL_6300_DISABLE_BOOT_IRQ; + pci_write_config_word(dev, INTEL_6300_IOAPIC_ABAR, + pci_config_word); + break; + case 0x3c28: /* Xeon E5 1600/2600/4600 */ + case 0x0e28: /* Xeon E5/E7 V2 */ + case 0x2f28: /* Xeon E5/E7 V3,V4 */ + case 0x6f28: /* Xeon D-1500 */ + case 0x2034: /* Xeon Scalable Family */ + pci_read_config_dword(dev, INTEL_CIPINTRC_CFG_OFFSET, + &pci_config_dword); + pci_config_dword |= INTEL_CIPINTRC_DIS_INTX_ICH; + pci_write_config_dword(dev, INTEL_CIPINTRC_CFG_OFFSET, + pci_config_dword); + break; + default: + return; + } pci_info(dev, "disabled boot interrupts on device [%04x:%04x]\n", dev->vendor, dev->device); } -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt); -DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt); +/* + * Device 29 Func 5 Device IDs of IO-APIC + * containing ABAR—APIC1 Alternate Base Address Register + */ +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, + quirk_disable_intel_boot_interrupt); +DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, + quirk_disable_intel_boot_interrupt); + +/* + * Device 5 Func 0 Device IDs of Core IO modules/hubs + * containing Coherent Interface Protocol Interrupt Control + * + * Device IDs obtained from volume 2 datasheets of commented + * families above. + */ +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x3c28, + quirk_disable_intel_boot_interrupt); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0e28, + quirk_disable_intel_boot_interrupt); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2f28, + quirk_disable_intel_boot_interrupt); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x6f28, + quirk_disable_intel_boot_interrupt); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2034, + quirk_disable_intel_boot_interrupt); +DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x3c28, + quirk_disable_intel_boot_interrupt); +DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x0e28, + quirk_disable_intel_boot_interrupt); +DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x2f28, + quirk_disable_intel_boot_interrupt); +DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x6f28, + quirk_disable_intel_boot_interrupt); +DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x2034, + quirk_disable_intel_boot_interrupt); /* Disable boot interrupts on HT-1000 */ #define BC_HT1000_FEATURE_REG 0x64 @@ -5216,3 +5282,21 @@ static void quirk_reset_lenovo_thinkpad_p50_nvgpu(struct pci_dev *pdev) DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, 0x13b1, PCI_CLASS_DISPLAY_VGA, 8, quirk_reset_lenovo_thinkpad_p50_nvgpu); + +/* + * Device [1b21:2142] + * When in D0, PME# doesn't get asserted when plugging USB 3.0 device. + */ +static void pci_fixup_no_d0_pme(struct pci_dev *dev) +{ + pci_info(dev, "PME# does not work under D0, disabling it\n"); + dev->pme_support &= ~(PCI_PM_CAP_PME_D0 >> PCI_PM_CAP_PME_SHIFT); +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ASMEDIA, 0x2142, pci_fixup_no_d0_pme); + +static void apex_pci_fixup_class(struct pci_dev *pdev) +{ + pdev->class = (PCI_CLASS_SYSTEM_OTHER << 8) | pdev->class; +} +DECLARE_PCI_FIXUP_CLASS_HEADER(0x1ac1, 0x089a, + PCI_CLASS_NOT_DEFINED, 8, apex_pci_fixup_class); diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c index 43431816412c1116859ebd2f29574e1d191bf477..291c0074ad6f465d7aaa8d2f517aa7113dc0187b 100644 --- a/drivers/pci/switch/switchtec.c +++ b/drivers/pci/switch/switchtec.c @@ -147,7 +147,7 @@ static int mrpc_queue_cmd(struct switchtec_user *stuser) kref_get(&stuser->kref); stuser->read_len = sizeof(stuser->data); stuser_set_state(stuser, MRPC_QUEUED); - init_completion(&stuser->comp); + reinit_completion(&stuser->comp); list_add_tail(&stuser->list, &stdev->mrpc_queue); mrpc_cmd_submit(stdev); diff --git a/drivers/platform/x86/gpd-pocket-fan.c b/drivers/platform/x86/gpd-pocket-fan.c index b471b86c28fe8d2e15aca3766de871e2dfececad..5b516e4c2bfbe2ac53aa68a7bbf99365d1c391a4 100644 --- a/drivers/platform/x86/gpd-pocket-fan.c +++ b/drivers/platform/x86/gpd-pocket-fan.c @@ -128,7 +128,7 @@ static int gpd_pocket_fan_probe(struct platform_device *pdev) for (i = 0; i < ARRAY_SIZE(temp_limits); i++) { if (temp_limits[i] < 20000 || temp_limits[i] > 90000) { - dev_err(&pdev->dev, "Invalid temp-limit %d (must be between 40000 and 70000)\n", + dev_err(&pdev->dev, "Invalid temp-limit %d (must be between 20000 and 90000)\n", temp_limits[i]); temp_limits[0] = TEMP_LIMIT0_DEFAULT; temp_limits[1] = TEMP_LIMIT1_DEFAULT; diff --git a/drivers/power/supply/axp288_charger.c b/drivers/power/supply/axp288_charger.c index c60659fb21deca7f2fd0c53c41e22e23802d2bb0..46eb7716c35c86473c81f9c41dfb4faa2a6ceb0a 100644 --- a/drivers/power/supply/axp288_charger.c +++ b/drivers/power/supply/axp288_charger.c @@ -28,6 +28,7 @@ #include #include #include +#include #define PS_STAT_VBUS_TRIGGER (1 << 0) #define PS_STAT_BAT_CHRG_DIR (1 << 2) @@ -552,6 +553,49 @@ static irqreturn_t axp288_charger_irq_thread_handler(int irq, void *dev) return IRQ_HANDLED; } +/* + * The HP Pavilion x2 10 series comes in a number of variants: + * Bay Trail SoC + AXP288 PMIC, DMI_BOARD_NAME: "815D" + * Cherry Trail SoC + AXP288 PMIC, DMI_BOARD_NAME: "813E" + * Cherry Trail SoC + TI PMIC, DMI_BOARD_NAME: "827C" or "82F4" + * + * The variants with the AXP288 PMIC are all kinds of special: + * + * 1. All variants use a Type-C connector which the AXP288 does not support, so + * when using a Type-C charger it is not recognized. Unlike most AXP288 devices, + * this model actually has mostly working ACPI AC / Battery code, the ACPI code + * "solves" this by simply setting the input_current_limit to 3A. + * There are still some issues with the ACPI code, so we use this native driver, + * and to solve the charging not working (500mA is not enough) issue we hardcode + * the 3A input_current_limit like the ACPI code does. + * + * 2. If no charger is connected the machine boots with the vbus-path disabled. + * Normally this is done when a 5V boost converter is active to avoid the PMIC + * trying to charge from the 5V boost converter's output. This is done when + * an OTG host cable is inserted and the ID pin on the micro-B receptacle is + * pulled low and the ID pin has an ACPI event handler associated with it + * which re-enables the vbus-path when the ID pin is pulled high when the + * OTG host cable is removed. The Type-C connector has no ID pin, there is + * no ID pin handler and there appears to be no 5V boost converter, so we + * end up not charging because the vbus-path is disabled, until we unplug + * the charger which automatically clears the vbus-path disable bit and then + * on the second plug-in of the adapter we start charging. To solve the not + * charging on first charger plugin we unconditionally enable the vbus-path at + * probe on this model, which is safe since there is no 5V boost converter. + */ +static const struct dmi_system_id axp288_hp_x2_dmi_ids[] = { + { + /* + * Bay Trail model has "Hewlett-Packard" as sys_vendor, Cherry + * Trail model has "HP", so we only match on product_name. + */ + .matches = { + DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion x2 Detachable"), + }, + }, + {} /* Terminating entry */ +}; + static void axp288_charger_extcon_evt_worker(struct work_struct *work) { struct axp288_chrg_info *info = @@ -575,7 +619,11 @@ static void axp288_charger_extcon_evt_worker(struct work_struct *work) } /* Determine cable/charger type */ - if (extcon_get_state(edev, EXTCON_CHG_USB_SDP) > 0) { + if (dmi_check_system(axp288_hp_x2_dmi_ids)) { + /* See comment above axp288_hp_x2_dmi_ids declaration */ + dev_dbg(&info->pdev->dev, "HP X2 with Type-C, setting inlmt to 3A\n"); + current_limit = 3000000; + } else if (extcon_get_state(edev, EXTCON_CHG_USB_SDP) > 0) { dev_dbg(&info->pdev->dev, "USB SDP charger is connected\n"); current_limit = 500000; } else if (extcon_get_state(edev, EXTCON_CHG_USB_CDP) > 0) { @@ -692,6 +740,13 @@ static int charger_init_hw_regs(struct axp288_chrg_info *info) return ret; } + if (dmi_check_system(axp288_hp_x2_dmi_ids)) { + /* See comment above axp288_hp_x2_dmi_ids declaration */ + ret = axp288_charger_vbus_path_select(info, true); + if (ret < 0) + return ret; + } + /* Read current charge voltage and current limit */ ret = regmap_read(info->regmap, AXP20X_CHRG_CTRL1, &val); if (ret < 0) { diff --git a/drivers/power/supply/axp288_fuel_gauge.c b/drivers/power/supply/axp288_fuel_gauge.c index ab0b6e78ca02a6811ee2a5dcd7fd1fa5b392908c..157cf5ec6b02301ffe1a1e8bf912942484019559 100644 --- a/drivers/power/supply/axp288_fuel_gauge.c +++ b/drivers/power/supply/axp288_fuel_gauge.c @@ -718,14 +718,14 @@ static const struct dmi_system_id axp288_fuel_gauge_blacklist[] = { { /* Intel Cherry Trail Compute Stick, Windows version */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"), + DMI_MATCH(DMI_SYS_VENDOR, "Intel"), DMI_MATCH(DMI_PRODUCT_NAME, "STK1AW32SC"), }, }, { /* Intel Cherry Trail Compute Stick, version without an OS */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"), + DMI_MATCH(DMI_SYS_VENDOR, "Intel"), DMI_MATCH(DMI_PRODUCT_NAME, "STK1A32SC"), }, }, diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c index f022e1b550dfe6f736b8b226516047c1786e1cea..ff02a917556a9ef6288cc4429befdbfb1ff4ea35 100644 --- a/drivers/power/supply/bq27xxx_battery.c +++ b/drivers/power/supply/bq27xxx_battery.c @@ -1887,7 +1887,10 @@ int bq27xxx_battery_setup(struct bq27xxx_device_info *di) di->bat = power_supply_register_no_ws(di->dev, psy_desc, &psy_cfg); if (IS_ERR(di->bat)) { - dev_err(di->dev, "failed to register battery\n"); + if (PTR_ERR(di->bat) == -EPROBE_DEFER) + dev_dbg(di->dev, "failed to register battery, deferring probe\n"); + else + dev_err(di->dev, "failed to register battery\n"); return PTR_ERR(di->bat); } diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c index 917cc282637f8fdb97c6c6731bfcab8f1fc1a55a..2894700c348652303c16b1cb3ed44e6726a5e67b 100644 --- a/drivers/power/supply/power_supply_sysfs.c +++ b/drivers/power/supply/power_supply_sysfs.c @@ -65,8 +65,7 @@ static const char * const power_supply_charge_type_text[] = { static const char * const power_supply_health_text[] = { "Unknown", "Good", "Overheat", "Dead", "Over voltage", "Unspecified failure", "Cold", "Watchdog timer expire", - "Safety timer expire", - "Warm", "Cool", "Hot" + "Safety timer expire", "Over current", "Warm", "Cool", "Hot" }; static const char * const power_supply_technology_text[] = { @@ -506,6 +505,8 @@ static struct device_attribute power_supply_attrs[] = { POWER_SUPPLY_ATTR(irq_status), POWER_SUPPLY_ATTR(parallel_output_mode), POWER_SUPPLY_ATTR(alignment), + POWER_SUPPLY_ATTR(moisture_detection_enabled), + POWER_SUPPLY_ATTR(fg_type), /* Local extensions of type int64_t */ POWER_SUPPLY_ATTR(charge_counter_ext), POWER_SUPPLY_ATTR(charge_charger_state), diff --git a/drivers/pwm/pwm-bcm2835.c b/drivers/pwm/pwm-bcm2835.c index db001cba937fd8617fc81720e21c30601f2bfec5..e340ad79a1ec95c060af20691832a346c1b49e26 100644 --- a/drivers/pwm/pwm-bcm2835.c +++ b/drivers/pwm/pwm-bcm2835.c @@ -166,6 +166,7 @@ static int bcm2835_pwm_probe(struct platform_device *pdev) pc->chip.dev = &pdev->dev; pc->chip.ops = &bcm2835_pwm_ops; + pc->chip.base = -1; pc->chip.npwm = 2; pc->chip.of_xlate = of_pwm_xlate_with_flags; pc->chip.of_pwm_n_cells = 3; diff --git a/drivers/pwm/pwm-pca9685.c b/drivers/pwm/pwm-pca9685.c index e1e5dfcb16f367d7fbc0a72cd22113bf9a731f21..259fd58812aed13f293c52105ec415a6f48ca12c 100644 --- a/drivers/pwm/pwm-pca9685.c +++ b/drivers/pwm/pwm-pca9685.c @@ -31,6 +31,7 @@ #include #include #include +#include /* * Because the PCA9685 has only one prescaler per chip, changing the period of @@ -85,6 +86,7 @@ struct pca9685 { #if IS_ENABLED(CONFIG_GPIOLIB) struct mutex lock; struct gpio_chip gpio; + DECLARE_BITMAP(pwms_inuse, PCA9685_MAXCHAN + 1); #endif }; @@ -94,51 +96,51 @@ static inline struct pca9685 *to_pca(struct pwm_chip *chip) } #if IS_ENABLED(CONFIG_GPIOLIB) -static int pca9685_pwm_gpio_request(struct gpio_chip *gpio, unsigned int offset) +static bool pca9685_pwm_test_and_set_inuse(struct pca9685 *pca, int pwm_idx) { - struct pca9685 *pca = gpiochip_get_data(gpio); - struct pwm_device *pwm; + bool is_inuse; mutex_lock(&pca->lock); - - pwm = &pca->chip.pwms[offset]; - - if (pwm->flags & (PWMF_REQUESTED | PWMF_EXPORTED)) { - mutex_unlock(&pca->lock); - return -EBUSY; + if (pwm_idx >= PCA9685_MAXCHAN) { + /* + * "all LEDs" channel: + * pretend already in use if any of the PWMs are requested + */ + if (!bitmap_empty(pca->pwms_inuse, PCA9685_MAXCHAN)) { + is_inuse = true; + goto out; + } + } else { + /* + * regular channel: + * pretend already in use if the "all LEDs" channel is requested + */ + if (test_bit(PCA9685_MAXCHAN, pca->pwms_inuse)) { + is_inuse = true; + goto out; + } } - - pwm_set_chip_data(pwm, (void *)1); - + is_inuse = test_and_set_bit(pwm_idx, pca->pwms_inuse); +out: mutex_unlock(&pca->lock); - pm_runtime_get_sync(pca->chip.dev); - return 0; + return is_inuse; } -static bool pca9685_pwm_is_gpio(struct pca9685 *pca, struct pwm_device *pwm) +static void pca9685_pwm_clear_inuse(struct pca9685 *pca, int pwm_idx) { - bool is_gpio = false; - mutex_lock(&pca->lock); + clear_bit(pwm_idx, pca->pwms_inuse); + mutex_unlock(&pca->lock); +} - if (pwm->hwpwm >= PCA9685_MAXCHAN) { - unsigned int i; - - /* - * Check if any of the GPIOs are requested and in that case - * prevent using the "all LEDs" channel. - */ - for (i = 0; i < pca->gpio.ngpio; i++) - if (gpiochip_is_requested(&pca->gpio, i)) { - is_gpio = true; - break; - } - } else if (pwm_get_chip_data(pwm)) { - is_gpio = true; - } +static int pca9685_pwm_gpio_request(struct gpio_chip *gpio, unsigned int offset) +{ + struct pca9685 *pca = gpiochip_get_data(gpio); - mutex_unlock(&pca->lock); - return is_gpio; + if (pca9685_pwm_test_and_set_inuse(pca, offset)) + return -EBUSY; + pm_runtime_get_sync(pca->chip.dev); + return 0; } static int pca9685_pwm_gpio_get(struct gpio_chip *gpio, unsigned int offset) @@ -173,6 +175,7 @@ static void pca9685_pwm_gpio_free(struct gpio_chip *gpio, unsigned int offset) pca9685_pwm_gpio_set(gpio, offset, 0); pm_runtime_put(pca->chip.dev); + pca9685_pwm_clear_inuse(pca, offset); } static int pca9685_pwm_gpio_get_direction(struct gpio_chip *chip, @@ -224,12 +227,17 @@ static int pca9685_pwm_gpio_probe(struct pca9685 *pca) return devm_gpiochip_add_data(dev, &pca->gpio, pca); } #else -static inline bool pca9685_pwm_is_gpio(struct pca9685 *pca, - struct pwm_device *pwm) +static inline bool pca9685_pwm_test_and_set_inuse(struct pca9685 *pca, + int pwm_idx) { return false; } +static inline void +pca9685_pwm_clear_inuse(struct pca9685 *pca, int pwm_idx) +{ +} + static inline int pca9685_pwm_gpio_probe(struct pca9685 *pca) { return 0; @@ -413,7 +421,7 @@ static int pca9685_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm) { struct pca9685 *pca = to_pca(chip); - if (pca9685_pwm_is_gpio(pca, pwm)) + if (pca9685_pwm_test_and_set_inuse(pca, pwm->hwpwm)) return -EBUSY; pm_runtime_get_sync(chip->dev); @@ -422,8 +430,11 @@ static int pca9685_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm) static void pca9685_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm) { + struct pca9685 *pca = to_pca(chip); + pca9685_pwm_disable(chip, pwm); pm_runtime_put(chip->dev); + pca9685_pwm_clear_inuse(pca, pwm->hwpwm); } static const struct pwm_ops pca9685_pwm_ops = { diff --git a/drivers/pwm/pwm-rcar.c b/drivers/pwm/pwm-rcar.c index 748f614d53755daabdd9f1529d7ba9cd602cb611..b7d71bf297d6947c5931376016403632ee5a02c4 100644 --- a/drivers/pwm/pwm-rcar.c +++ b/drivers/pwm/pwm-rcar.c @@ -232,24 +232,28 @@ static int rcar_pwm_probe(struct platform_device *pdev) rcar_pwm->chip.base = -1; rcar_pwm->chip.npwm = 1; + pm_runtime_enable(&pdev->dev); + ret = pwmchip_add(&rcar_pwm->chip); if (ret < 0) { dev_err(&pdev->dev, "failed to register PWM chip: %d\n", ret); + pm_runtime_disable(&pdev->dev); return ret; } - pm_runtime_enable(&pdev->dev); - return 0; } static int rcar_pwm_remove(struct platform_device *pdev) { struct rcar_pwm_chip *rcar_pwm = platform_get_drvdata(pdev); + int ret; + + ret = pwmchip_remove(&rcar_pwm->chip); pm_runtime_disable(&pdev->dev); - return pwmchip_remove(&rcar_pwm->chip); + return ret; } static const struct of_device_id rcar_pwm_of_table[] = { diff --git a/drivers/pwm/pwm-renesas-tpu.c b/drivers/pwm/pwm-renesas-tpu.c index 29267d12fb4c9d3cf9283c38b114b00430704b09..9c7962f2f0aa4c058f497c58e9107e02e18168cf 100644 --- a/drivers/pwm/pwm-renesas-tpu.c +++ b/drivers/pwm/pwm-renesas-tpu.c @@ -423,16 +423,17 @@ static int tpu_probe(struct platform_device *pdev) tpu->chip.base = -1; tpu->chip.npwm = TPU_CHANNEL_MAX; + pm_runtime_enable(&pdev->dev); + ret = pwmchip_add(&tpu->chip); if (ret < 0) { dev_err(&pdev->dev, "failed to register PWM chip\n"); + pm_runtime_disable(&pdev->dev); return ret; } dev_info(&pdev->dev, "TPU PWM %d registered\n", tpu->pdev->id); - pm_runtime_enable(&pdev->dev); - return 0; } @@ -442,12 +443,10 @@ static int tpu_remove(struct platform_device *pdev) int ret; ret = pwmchip_remove(&tpu->chip); - if (ret) - return ret; pm_runtime_disable(&pdev->dev); - return 0; + return ret; } #ifdef CONFIG_OF diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c index abbef17c97ee2a9ec44038ea94d3cf80e0febf7b..d5ff272fde34390ae3c26f716aac500c6fb3fe5a 100644 --- a/drivers/remoteproc/remoteproc_core.c +++ b/drivers/remoteproc/remoteproc_core.c @@ -289,7 +289,7 @@ void rproc_free_vring(struct rproc_vring *rvring) { int size = PAGE_ALIGN(vring_size(rvring->len, rvring->align)); struct rproc *rproc = rvring->rvdev->rproc; - int idx = rvring->rvdev->vring - rvring; + int idx = rvring - rvring->rvdev->vring; struct fw_rsc_vdev *rsc; dma_free_coherent(rproc->dev.parent, size, rvring->va, rvring->dma); diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c index 25c394a7077b8a729a8d6c669acd51d65c26448e..facc577ab0acc19b1c99ff91655a6710222a7567 100644 --- a/drivers/rpmsg/qcom_glink_native.c +++ b/drivers/rpmsg/qcom_glink_native.c @@ -813,9 +813,6 @@ static int qcom_glink_rx_data(struct qcom_glink *glink, size_t avail) return -EAGAIN; } - if (WARN(chunk_size % 4, "Incoming data must be word aligned\n")) - return -EINVAL; - rcid = le16_to_cpu(hdr.msg.param1); spin_lock_irqsave(&glink->idr_lock, flags); channel = idr_find(&glink->rcids, rcid); diff --git a/drivers/rtc/rtc-88pm860x.c b/drivers/rtc/rtc-88pm860x.c index 73697e4b18a9d4d9d85546a1d807a861966df8d8..9d4a59aa29a1a20b54820080ef4659749cf5a6fc 100644 --- a/drivers/rtc/rtc-88pm860x.c +++ b/drivers/rtc/rtc-88pm860x.c @@ -341,6 +341,10 @@ static int pm860x_rtc_probe(struct platform_device *pdev) info->dev = &pdev->dev; dev_set_drvdata(&pdev->dev, info); + info->rtc_dev = devm_rtc_allocate_device(&pdev->dev); + if (IS_ERR(info->rtc_dev)) + return PTR_ERR(info->rtc_dev); + ret = devm_request_threaded_irq(&pdev->dev, info->irq, NULL, rtc_update_handler, IRQF_ONESHOT, "rtc", info); @@ -382,13 +386,11 @@ static int pm860x_rtc_probe(struct platform_device *pdev) } } - info->rtc_dev = devm_rtc_device_register(&pdev->dev, "88pm860x-rtc", - &pm860x_rtc_ops, THIS_MODULE); - ret = PTR_ERR(info->rtc_dev); - if (IS_ERR(info->rtc_dev)) { - dev_err(&pdev->dev, "Failed to register RTC device: %d\n", ret); + info->rtc_dev->ops = &pm860x_rtc_ops; + + ret = rtc_register_device(info->rtc_dev); + if (ret) return ret; - } /* * enable internal XO instead of internal 3.25MHz clock since it can diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c index 323ff55cc16564833d75ed4b0fa682642ad4fc0f..080211dd916a654abcf8bb277ae887d7309f2e5a 100644 --- a/drivers/rtc/rtc-omap.c +++ b/drivers/rtc/rtc-omap.c @@ -561,9 +561,7 @@ static const struct pinctrl_ops rtc_pinctrl_ops = { .dt_free_map = pinconf_generic_dt_free_map, }; -enum rtc_pin_config_param { - PIN_CONFIG_ACTIVE_HIGH = PIN_CONFIG_END + 1, -}; +#define PIN_CONFIG_ACTIVE_HIGH (PIN_CONFIG_END + 1) static const struct pinconf_generic_params rtc_params[] = { {"ti,active-high", PIN_CONFIG_ACTIVE_HIGH, 0}, diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 1540229a37bbab129e39b8ef0ad943be237b86b8..c9bc9a6bd73b7a5b237fdaeb79afe27a3bbd8dc3 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -827,8 +827,10 @@ static void io_subchannel_register(struct ccw_device *cdev) * Now we know this subchannel will stay, we can throw * our delayed uevent. */ - dev_set_uevent_suppress(&sch->dev, 0); - kobject_uevent(&sch->dev.kobj, KOBJ_ADD); + if (dev_get_uevent_suppress(&sch->dev)) { + dev_set_uevent_suppress(&sch->dev, 0); + kobject_uevent(&sch->dev.kobj, KOBJ_ADD); + } /* make it known to the system */ ret = ccw_device_add(cdev); if (ret) { @@ -1036,8 +1038,11 @@ static int io_subchannel_probe(struct subchannel *sch) * Throw the delayed uevent for the subchannel, register * the ccw_device and exit. */ - dev_set_uevent_suppress(&sch->dev, 0); - kobject_uevent(&sch->dev.kobj, KOBJ_ADD); + if (dev_get_uevent_suppress(&sch->dev)) { + /* should always be the case for the console */ + dev_set_uevent_suppress(&sch->dev, 0); + kobject_uevent(&sch->dev.kobj, KOBJ_ADD); + } cdev = sch_get_cdev(sch); rc = ccw_device_add(cdev); if (rc) { diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index f602b42b8343d3fcc44d199c8864a4c11cffc97d..7522aa06672dd025ff04c71fdff2f0e44eefbe11 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c @@ -738,7 +738,7 @@ static void zfcp_erp_enqueue_ptp_port(struct zfcp_adapter *adapter) adapter->peer_d_id); if (IS_ERR(port)) /* error or port already attached */ return; - _zfcp_erp_port_reopen(port, 0, "ereptp1"); + zfcp_erp_port_reopen(port, 0, "ereptp1"); } static int zfcp_erp_adapter_strat_fsf_xconf(struct zfcp_erp_action *erp_action) diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c index 6c355d87c709d19074e850f2c6eaad0d4bc6298d..e3013858937b7aac2437ff19c2e8e0142bbdde5a 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.c +++ b/drivers/scsi/lpfc/lpfc_nvme.c @@ -342,13 +342,15 @@ lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport) if (ndlp->upcall_flags & NLP_WAIT_FOR_UNREG) { ndlp->nrport = NULL; ndlp->upcall_flags &= ~NLP_WAIT_FOR_UNREG; - } - spin_unlock_irq(&vport->phba->hbalock); + spin_unlock_irq(&vport->phba->hbalock); - /* Remove original register reference. The host transport - * won't reference this rport/remoteport any further. - */ - lpfc_nlp_put(ndlp); + /* Remove original register reference. The host transport + * won't reference this rport/remoteport any further. + */ + lpfc_nlp_put(ndlp); + } else { + spin_unlock_irq(&vport->phba->hbalock); + } rport_err: return; @@ -1903,8 +1905,6 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, /* Declare and initialization an instance of the FC NVME template. */ static struct nvme_fc_port_template lpfc_nvme_template = { - .module = THIS_MODULE, - /* initiator-based functions */ .localport_delete = lpfc_nvme_localport_delete, .remoteport_delete = lpfc_nvme_remoteport_delete, diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index a801917d3c193c98f426972d12059b2c3eb75917..a56a939792ac1ff621e396802ffce745ca15e8c6 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -2472,6 +2472,8 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) !pmb->u.mb.mbxStatus) { rpi = pmb->u.mb.un.varWords[0]; vpi = pmb->u.mb.un.varRegLogin.vpi; + if (phba->sli_rev == LPFC_SLI_REV4) + vpi -= phba->sli4_hba.max_cfg_param.vpi_base; lpfc_unreg_login(phba, vpi, rpi, pmb); pmb->vport = vport; pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index d3c944d997039849f2b90618a66a771f9c5cebc3..5a5e5c3da657180aa65edc2efe322159e921b7aa 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -9841,8 +9841,8 @@ static void scsih_remove(struct pci_dev *pdev) ioc->remove_host = 1; - mpt3sas_wait_for_commands_to_complete(ioc); - _scsih_flush_running_cmds(ioc); + if (!pci_device_is_present(pdev)) + _scsih_flush_running_cmds(ioc); _scsih_fw_event_cleanup_queue(ioc); @@ -9919,8 +9919,8 @@ scsih_shutdown(struct pci_dev *pdev) ioc->remove_host = 1; - mpt3sas_wait_for_commands_to_complete(ioc); - _scsih_flush_running_cmds(ioc); + if (!pci_device_is_present(pdev)) + _scsih_flush_running_cmds(ioc); _scsih_fw_event_cleanup_queue(ioc); diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c index db367e428095da84334b93e7e5c267003d026e87..5590d6e8b57624404df0cb481ab134f2e3374a5d 100644 --- a/drivers/scsi/qla2xxx/qla_nvme.c +++ b/drivers/scsi/qla2xxx/qla_nvme.c @@ -560,7 +560,6 @@ static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport) } static struct nvme_fc_port_template qla_nvme_fc_transport = { - .module = THIS_MODULE, .localport_delete = qla_nvme_localport_delete, .remoteport_delete = qla_nvme_remoteport_delete, .create_queue = qla_nvme_alloc_queue, diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index fff20a3707677b1c1cd2d3deefaddec971dad0c1..59b8681842be38ef37c93bff49590080446b2ff2 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -3654,6 +3654,13 @@ qla2x00_remove_one(struct pci_dev *pdev) } qla2x00_wait_for_hba_ready(base_vha); + /* + * if UNLOADING flag is already set, then continue unload, + * where it was set first. + */ + if (test_and_set_bit(UNLOADING, &base_vha->dpc_flags)) + return; + if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha)) { if (ha->flags.fw_started) qla2x00_abort_isp_cleanup(base_vha); @@ -3671,15 +3678,6 @@ qla2x00_remove_one(struct pci_dev *pdev) qla2x00_wait_for_sess_deletion(base_vha); - /* - * if UNLOAD flag is already set, then continue unload, - * where it was set first. - */ - if (test_bit(UNLOADING, &base_vha->dpc_flags)) - return; - - set_bit(UNLOADING, &base_vha->dpc_flags); - qla_nvme_delete(base_vha); dma_free_coherent(&ha->pdev->dev, @@ -4647,6 +4645,9 @@ qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type) struct qla_work_evt *e; uint8_t bail; + if (test_bit(UNLOADING, &vha->dpc_flags)) + return NULL; + QLA_VHA_MARK_BUSY(vha, bail); if (bail) return NULL; @@ -5845,13 +5846,6 @@ qla2x00_disable_board_on_pci_error(struct work_struct *work) struct pci_dev *pdev = ha->pdev; scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); - /* - * if UNLOAD flag is already set, then continue unload, - * where it was set first. - */ - if (test_bit(UNLOADING, &base_vha->dpc_flags)) - return; - ql_log(ql_log_warn, base_vha, 0x015b, "Disabling adapter.\n"); @@ -5862,9 +5856,14 @@ qla2x00_disable_board_on_pci_error(struct work_struct *work) return; } - qla2x00_wait_for_sess_deletion(base_vha); + /* + * if UNLOADING flag is already set, then continue unload, + * where it was set first. + */ + if (test_and_set_bit(UNLOADING, &base_vha->dpc_flags)) + return; - set_bit(UNLOADING, &base_vha->dpc_flags); + qla2x00_wait_for_sess_deletion(base_vha); qla2x00_delete_all_vps(ha, base_vha); diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index c501fb5190a385f7d7c4f808a2812bc1f6edeaac..54e74ca306060292e7b33607786ca1786581f720 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -279,7 +279,11 @@ int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, rq->cmd_len = COMMAND_SIZE(cmd[0]); memcpy(rq->cmd, cmd, rq->cmd_len); rq->retries = retries; - req->timeout = timeout; + if (likely(!sdev->timeout_override)) + req->timeout = timeout; + else + req->timeout = sdev->timeout_override; + req->cmd_flags |= flags; req->rq_flags |= rq_flags | RQF_QUIET; @@ -2468,6 +2472,33 @@ void scsi_unblock_requests(struct Scsi_Host *shost) } EXPORT_SYMBOL(scsi_unblock_requests); +/* + * Function: scsi_set_cmd_timeout_override() + * + * Purpose: Utility function used by low-level drivers to override + timeout for the scsi commands. + * + * Arguments: sdev - scsi device in question + * timeout - timeout in jiffies + * + * Returns: Nothing + * + * Lock status: No locks are assumed held. + * + * Notes: Some platforms might be very slow and command completion may + * take much longer than default scsi command timeouts. + * SCSI Read/Write command timeout can be changed by + * blk_queue_rq_timeout() but there is no option to override + * timeout for rest of the scsi commands. This function would + * would allow this. + */ +void scsi_set_cmd_timeout_override(struct scsi_device *sdev, + unsigned int timeout) +{ + sdev->timeout_override = timeout; +} +EXPORT_SYMBOL(scsi_set_cmd_timeout_override); + int __init scsi_init_queue(void) { scsi_sdb_cache = kmem_cache_create("scsi_data_buffer", diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index c0fb9e7890807f34a958308bff2d7ef2a37151bf..04d095488c764726efd5da82bc09ba72457e4318 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -2010,7 +2010,7 @@ static void __iscsi_unbind_session(struct work_struct *work) if (session->target_id == ISCSI_MAX_TARGET) { spin_unlock_irqrestore(&session->lock, flags); mutex_unlock(&ihost->mutex); - return; + goto unbind_session_exit; } target_id = session->target_id; @@ -2022,6 +2022,8 @@ static void __iscsi_unbind_session(struct work_struct *work) ida_simple_remove(&iscsi_sess_ida, target_id); scsi_remove_target(&session->dev); + +unbind_session_exit: iscsi_session_event(session, ISCSI_KEVENT_UNBIND_SESSION); ISCSI_DBG_TRANS_SESSION(session, "Completed target removal\n"); } diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 90f9ffa917a3c8333b585bc5b3d95ca946d6f11d..f4b222ea4543c42aee25ccc6a5e6e7b85d7ad4b0 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -967,7 +967,10 @@ static int sd_setup_write_same_cmnd(struct scsi_cmnd *cmd) sector >>= ilog2(sdp->sector_size) - 9; nr_sectors >>= ilog2(sdp->sector_size) - 9; - rq->timeout = SD_WRITE_SAME_TIMEOUT; + if (likely(!sdp->timeout_override)) + rq->timeout = SD_WRITE_SAME_TIMEOUT; + else + rq->timeout = sdp->timeout_override; if (sdkp->ws16 || sector > 0xffffffff || nr_sectors > 0xffff) { cmd->cmd_len = 16; @@ -2680,6 +2683,7 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer) { int len = 0, res; struct scsi_device *sdp = sdkp->device; + struct Scsi_Host *host = sdp->host; int dbd; int modepage; @@ -2711,7 +2715,10 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer) dbd = 8; } else { modepage = 8; - dbd = 0; + if (host->set_dbd_for_caching) + dbd = 8; + else + dbd = 0; } /* cautiously ask */ diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h index a7d4f50b67d433080eff0f6a7df34e1910683513..32c10b8221da0a1c3857448a61eaf1e078ab3ac2 100644 --- a/drivers/scsi/sd.h +++ b/drivers/scsi/sd.h @@ -117,6 +117,9 @@ struct scsi_disk { unsigned urswrz : 1; unsigned security : 1; unsigned ignore_medium_access_errors : 1; + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); }; #define to_scsi_disk(obj) container_of(obj,struct scsi_disk,dev) diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 8a254bb46a9b402b83e8e99e4474327c75c52c18..260a4b2a1808eed69f9321ab9d4319ebc0367d7a 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -808,8 +808,10 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp, "sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n", (int) cmnd[0], (int) hp->cmd_len)); - if (hp->dxfer_len >= SZ_256M) + if (hp->dxfer_len >= SZ_256M) { + sg_remove_request(sfp, srp); return -EINVAL; + } k = sg_start_req(srp, cmnd); if (k) { @@ -838,7 +840,11 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp, else at_head = 1; - srp->rq->timeout = timeout; + if (likely(!sdp->device->timeout_override)) + srp->rq->timeout = timeout; + else + srp->rq->timeout = sdp->device->timeout_override; + kref_get(&sfp->f_ref); /* sg_rq_end_io() does kref_put(). */ blk_execute_rq_nowait(sdp->device->request_queue, sdp->disk, srp->rq, at_head, sg_rq_end_io); diff --git a/drivers/scsi/smartpqi/smartpqi_sas_transport.c b/drivers/scsi/smartpqi/smartpqi_sas_transport.c index b209a35e482ef87348a5ea6a7554443742077655..01dfb97b07786b497ca2c619c8fb4216bb177dc4 100644 --- a/drivers/scsi/smartpqi/smartpqi_sas_transport.c +++ b/drivers/scsi/smartpqi/smartpqi_sas_transport.c @@ -50,9 +50,9 @@ static void pqi_free_sas_phy(struct pqi_sas_phy *pqi_sas_phy) struct sas_phy *phy = pqi_sas_phy->phy; sas_port_delete_phy(pqi_sas_phy->parent_port->port, phy); - sas_phy_free(phy); if (pqi_sas_phy->added_to_port) list_del(&pqi_sas_phy->phy_list_entry); + sas_phy_delete(phy); kfree(pqi_sas_phy); } diff --git a/drivers/scsi/ufs/ufshcd-crypto.c b/drivers/scsi/ufs/ufshcd-crypto.c index d62ab7a9faff9effda3e84d22538addfb9c2bf49..43d105bc0e2666d32ee9813a92c8084330da9c78 100644 --- a/drivers/scsi/ufs/ufshcd-crypto.c +++ b/drivers/scsi/ufs/ufshcd-crypto.c @@ -71,7 +71,7 @@ int ufshcd_crypto_cap_find(struct ufs_hba *hba, return -EINVAL; } -EXPORT_SYMBOL(ufshcd_crypto_cap_find); +EXPORT_SYMBOL_GPL(ufshcd_crypto_cap_find); /** * ufshcd_crypto_cfg_entry_write_key - Write a key into a crypto_cfg_entry @@ -344,6 +344,7 @@ int ufshcd_hba_init_crypto_spec(struct ufs_hba *hba, err = -ENOMEM; goto out_free_caps; } + keyslot_manager_set_max_dun_bytes(hba->ksm, sizeof(u64)); return 0; diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 1c8804999f7f78dc2424baeadb8615eb3dff6830..49176ca82de44c0ea066a68b7ea8c14968fc98f8 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -1579,6 +1579,11 @@ int ufshcd_hold(struct ufs_hba *hba, bool async) */ if (ufshcd_can_hibern8_during_gating(hba) && ufshcd_is_link_hibern8(hba)) { + if (async) { + rc = -EAGAIN; + hba->clk_gating.active_reqs--; + break; + } spin_unlock_irqrestore(hba->host->host_lock, flags); flush_work(&hba->clk_gating.ungate_work); spin_lock_irqsave(hba->host->host_lock, flags); diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index 71fc93bf5ea6b37c325261d83ed4d02b9701e8d4..be02ecd69c4a3c05655c1cae1dad682c09b93292 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -65,6 +65,7 @@ #include #include #include +#include #include "ufs.h" #include "ufshci.h" @@ -339,6 +340,11 @@ struct ufs_hba_variant_ops { int (*phy_initialization)(struct ufs_hba *); int (*program_key)(struct ufs_hba *hba, const union ufs_crypto_cfg_entry *cfg, int slot); + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); }; struct keyslot_mgmt_ll_ops; @@ -362,6 +368,11 @@ struct ufs_hba_crypto_variant_ops { struct scsi_cmnd *cmd, struct ufshcd_lrb *lrbp); void *priv; + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); }; /* clock gating state */ @@ -773,6 +784,11 @@ struct ufs_hba { u32 crypto_cfg_register; struct keyslot_manager *ksm; #endif /* CONFIG_SCSI_UFS_CRYPTO */ + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); }; /* Returns true if clocks can be gated. Otherwise false */ diff --git a/drivers/soc/imx/gpc.c b/drivers/soc/imx/gpc.c index d160fc2a7b7a20a39d9710c2f5ac49a2737b1dda..56c019ec7f14aa6cdab88b42442eae4121fac44c 100644 --- a/drivers/soc/imx/gpc.c +++ b/drivers/soc/imx/gpc.c @@ -93,8 +93,8 @@ static int imx6_pm_domain_power_off(struct generic_pm_domain *genpd) static int imx6_pm_domain_power_on(struct generic_pm_domain *genpd) { struct imx_pm_domain *pd = to_imx_pm_domain(genpd); - int i, ret, sw, sw2iso; - u32 val; + int i, ret; + u32 val, req; if (pd->supply) { ret = regulator_enable(pd->supply); @@ -113,17 +113,18 @@ static int imx6_pm_domain_power_on(struct generic_pm_domain *genpd) regmap_update_bits(pd->regmap, pd->reg_offs + GPC_PGC_CTRL_OFFS, 0x1, 0x1); - /* Read ISO and ISO2SW power up delays */ - regmap_read(pd->regmap, pd->reg_offs + GPC_PGC_PUPSCR_OFFS, &val); - sw = val & 0x3f; - sw2iso = (val >> 8) & 0x3f; - /* Request GPC to power up domain */ - val = BIT(pd->cntr_pdn_bit + 1); - regmap_update_bits(pd->regmap, GPC_CNTR, val, val); + req = BIT(pd->cntr_pdn_bit + 1); + regmap_update_bits(pd->regmap, GPC_CNTR, req, req); - /* Wait ISO + ISO2SW IPG clock cycles */ - udelay(DIV_ROUND_UP(sw + sw2iso, pd->ipg_rate_mhz)); + /* Wait for the PGC to handle the request */ + ret = regmap_read_poll_timeout(pd->regmap, GPC_CNTR, val, !(val & req), + 1, 50); + if (ret) + pr_err("powerup request on domain %s timed out\n", genpd->name); + + /* Wait for reset to propagate through peripherals */ + usleep_range(5, 10); /* Disable reset clocks for all devices in the domain */ for (i = 0; i < pd->num_clks; i++) @@ -345,6 +346,7 @@ static const struct regmap_config imx_gpc_regmap_config = { .rd_table = &access_table, .wr_table = &access_table, .max_register = 0x2ac, + .fast_io = true, }; static struct generic_pm_domain *imx_gpc_onecell_domains[] = { diff --git a/drivers/staging/android/ion/Makefile b/drivers/staging/android/ion/Makefile index 22818c0fa4b27fb45fe49eafa9e3f62a821c1b41..e99ebb86a715a7b0f4f63f06f5ce711542928910 100644 --- a/drivers/staging/android/ion/Makefile +++ b/drivers/staging/android/ion/Makefile @@ -1,5 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_ION) += ion-alloc.o +CFLAGS_ion.o = -I$(src) ion-alloc-objs += ion.o ion-ioctl.o ion_heap.o ion-alloc-$(CONFIG_ION_SYSTEM_HEAP) += ion_system_heap.o ion_page_pool.o ion-alloc-$(CONFIG_ION_CARVEOUT_HEAP) += ion_carveout_heap.o diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c index 49bf964b2879a73e35aa5312c5ff1a809d387e07..5692828dd23d4bec80f866c4c5e9d40c75e0e7e5 100644 --- a/drivers/staging/android/ion/ion.c +++ b/drivers/staging/android/ion/ion.c @@ -29,6 +29,8 @@ #include #include +#define CREATE_TRACE_POINTS +#include "ion_trace.h" #include "ion.h" static struct ion_device *internal_dev; @@ -61,6 +63,20 @@ static void ion_buffer_add(struct ion_device *dev, rb_insert_color(&buffer->node, &dev->buffers); } +static void track_buffer_created(struct ion_buffer *buffer) +{ + long total = atomic_long_add_return(buffer->size, &total_heap_bytes); + + trace_ion_stat(buffer->sg_table, buffer->size, total); +} + +static void track_buffer_destroyed(struct ion_buffer *buffer) +{ + long total = atomic_long_sub_return(buffer->size, &total_heap_bytes); + + trace_ion_stat(buffer->sg_table, -buffer->size, total); +} + /* this function should only be called while dev->lock is held */ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, struct ion_device *dev, @@ -102,7 +118,7 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, mutex_lock(&dev->buffer_lock); ion_buffer_add(dev, buffer); mutex_unlock(&dev->buffer_lock); - atomic_long_add(len, &total_heap_bytes); + track_buffer_created(buffer); return buffer; err1: @@ -131,7 +147,7 @@ static void _ion_buffer_destroy(struct ion_buffer *buffer) mutex_lock(&dev->buffer_lock); rb_erase(&buffer->node, &dev->buffers); mutex_unlock(&dev->buffer_lock); - atomic_long_sub(buffer->size, &total_heap_bytes); + track_buffer_destroyed(buffer); if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) ion_heap_freelist_add(heap, buffer); diff --git a/drivers/staging/android/ion/ion_trace.h b/drivers/staging/android/ion/ion_trace.h new file mode 100644 index 0000000000000000000000000000000000000000..8233691a73eb1b7ff2fa5a016b47a6eda80fcf41 --- /dev/null +++ b/drivers/staging/android/ion/ion_trace.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * drivers/staging/android/ion/ion-trace.h + * + * Copyright (C) 2020 Google, Inc. + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM ion + +#if !defined(_ION_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _ION_TRACE_H + +#include + +#ifndef __ION_PTR_TO_HASHVAL +static unsigned int __ion_ptr_to_hash(const void *ptr) +{ + unsigned long hashval; + + if (ptr_to_hashval(ptr, &hashval)) + return 0; + + /* The hashed value is only 32-bit */ + return (unsigned int)hashval; +} + +#define __ION_PTR_TO_HASHVAL +#endif + +TRACE_EVENT(ion_stat, + TP_PROTO(const void *addr, long len, + unsigned long total_allocated), + TP_ARGS(addr, len, total_allocated), + TP_STRUCT__entry(__field(unsigned int, buffer_id) + __field(long, len) + __field(unsigned long, total_allocated) + ), + TP_fast_assign(__entry->buffer_id = __ion_ptr_to_hash(addr); + __entry->len = len; + __entry->total_allocated = total_allocated; + ), + TP_printk("buffer_id=%u len=%ldB total_allocated=%ldB", + __entry->buffer_id, + __entry->len, + __entry->total_allocated) + ); + +#endif /* _ION_TRACE_H */ + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#define TRACE_INCLUDE_FILE ion_trace +#include diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c index e18b61cdbdebac86adf0376cd1314576a62de699..636988248da2cd54a3f17c9ddf94ec3571e15190 100644 --- a/drivers/staging/comedi/comedi_fops.c +++ b/drivers/staging/comedi/comedi_fops.c @@ -2594,8 +2594,10 @@ static int comedi_open(struct inode *inode, struct file *file) } cfp = kzalloc(sizeof(*cfp), GFP_KERNEL); - if (!cfp) + if (!cfp) { + comedi_dev_put(dev); return -ENOMEM; + } cfp->dev = dev; diff --git a/drivers/staging/comedi/drivers/dt2815.c b/drivers/staging/comedi/drivers/dt2815.c index 83026ba63d1cc57a5fb0c727b72c22b2791e4f08..78a7c1b3448ab1088cf959bff6d4df4d85af0417 100644 --- a/drivers/staging/comedi/drivers/dt2815.c +++ b/drivers/staging/comedi/drivers/dt2815.c @@ -92,6 +92,7 @@ static int dt2815_ao_insn(struct comedi_device *dev, struct comedi_subdevice *s, int ret; for (i = 0; i < insn->n; i++) { + /* FIXME: lo bit 0 chooses voltage output or current output */ lo = ((data[i] & 0x0f) << 4) | (chan << 1) | 0x01; hi = (data[i] & 0xff0) >> 4; @@ -105,6 +106,8 @@ static int dt2815_ao_insn(struct comedi_device *dev, struct comedi_subdevice *s, if (ret) return ret; + outb(hi, dev->iobase + DT2815_DATA); + devpriv->ao_readback[chan] = data[i]; } return i; diff --git a/drivers/staging/erofs/utils.c b/drivers/staging/erofs/utils.c index 2d96820da62ecb3e3ad94d9a90cd7a1a9c997749..4de9c39535eb9e0a1275518a703d7beb6812dafd 100644 --- a/drivers/staging/erofs/utils.c +++ b/drivers/staging/erofs/utils.c @@ -309,7 +309,7 @@ unsigned long erofs_shrink_scan(struct shrinker *shrink, sbi->shrinker_run_no = run_no; #ifdef CONFIG_EROFS_FS_ZIP - freed += erofs_shrink_workstation(sbi, nr, false); + freed += erofs_shrink_workstation(sbi, nr - freed, false); #endif spin_lock(&erofs_sb_list_lock); diff --git a/drivers/staging/gasket/apex_driver.c b/drivers/staging/gasket/apex_driver.c index 0cef1d6d2e2b0c9963621e8a476c6c1fcc23c0b3..99da735bdf6d29f1a4a349c5a57a717097f75aa6 100644 --- a/drivers/staging/gasket/apex_driver.c +++ b/drivers/staging/gasket/apex_driver.c @@ -578,13 +578,6 @@ static const struct pci_device_id apex_pci_ids[] = { { PCI_DEVICE(APEX_PCI_VENDOR_ID, APEX_PCI_DEVICE_ID) }, { 0 } }; -static void apex_pci_fixup_class(struct pci_dev *pdev) -{ - pdev->class = (PCI_CLASS_SYSTEM_OTHER << 8) | pdev->class; -} -DECLARE_PCI_FIXUP_CLASS_HEADER(APEX_PCI_VENDOR_ID, APEX_PCI_DEVICE_ID, - PCI_CLASS_NOT_DEFINED, 8, apex_pci_fixup_class); - static int apex_pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) { diff --git a/drivers/staging/gasket/gasket_core.c b/drivers/staging/gasket/gasket_core.c index d12ab560411f704bd6f42c4094392ea7522f91d7..9396aeb3f431b8b76fac534e453f65625898cc90 100644 --- a/drivers/staging/gasket/gasket_core.c +++ b/drivers/staging/gasket/gasket_core.c @@ -933,6 +933,10 @@ do_map_region(const struct gasket_dev *gasket_dev, struct vm_area_struct *vma, gasket_get_bar_index(gasket_dev, (vma->vm_pgoff << PAGE_SHIFT) + driver_desc->legacy_mmap_address_offset); + + if (bar_index < 0) + return DO_MAP_REGION_INVALID; + phys_base = gasket_dev->bar_data[bar_index].phys_base + phys_offset; while (mapped_bytes < map_length) { /* diff --git a/drivers/staging/vt6656/int.c b/drivers/staging/vt6656/int.c index af0060c74530eece3e57e9bf097a85818275dc68..43f461f75b971aeb3f7730a3f0250401bcae95f7 100644 --- a/drivers/staging/vt6656/int.c +++ b/drivers/staging/vt6656/int.c @@ -143,7 +143,8 @@ void vnt_int_process_data(struct vnt_private *priv) priv->wake_up_count = priv->hw->conf.listen_interval; - --priv->wake_up_count; + if (priv->wake_up_count) + --priv->wake_up_count; /* Turn on wake up to listen next beacon */ if (priv->wake_up_count == 1) diff --git a/drivers/staging/vt6656/key.c b/drivers/staging/vt6656/key.c index 91dede54cc1f5c2ea533d1895541ffa5512c0f3b..be8dbf6c2c2f302aec5a0261ba0c36465b867494 100644 --- a/drivers/staging/vt6656/key.c +++ b/drivers/staging/vt6656/key.c @@ -81,9 +81,6 @@ static int vnt_set_keymode(struct ieee80211_hw *hw, u8 *mac_addr, case VNT_KEY_PAIRWISE: key_mode |= mode; key_inx = 4; - /* Don't save entry for pairwise key for station mode */ - if (priv->op_mode == NL80211_IFTYPE_STATION) - clear_bit(entry, &priv->key_entry_inuse); break; default: return -EINVAL; @@ -107,7 +104,6 @@ static int vnt_set_keymode(struct ieee80211_hw *hw, u8 *mac_addr, int vnt_set_keys(struct ieee80211_hw *hw, struct ieee80211_sta *sta, struct ieee80211_vif *vif, struct ieee80211_key_conf *key) { - struct ieee80211_bss_conf *conf = &vif->bss_conf; struct vnt_private *priv = hw->priv; u8 *mac_addr = NULL; u8 key_dec_mode = 0; @@ -149,16 +145,12 @@ int vnt_set_keys(struct ieee80211_hw *hw, struct ieee80211_sta *sta, key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; } - if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) { + if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) vnt_set_keymode(hw, mac_addr, key, VNT_KEY_PAIRWISE, key_dec_mode, true); - } else { - vnt_set_keymode(hw, mac_addr, key, VNT_KEY_DEFAULTKEY, + else + vnt_set_keymode(hw, mac_addr, key, VNT_KEY_GROUP_ADDRESS, key_dec_mode, true); - vnt_set_keymode(hw, (u8 *)conf->bssid, key, - VNT_KEY_GROUP_ADDRESS, key_dec_mode, true); - } - return 0; } diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c index 36562ac94c1f892096440ae7d122c0a62a3fcd72..586a3d331511c130e74f974d6cc45b37c587d9a2 100644 --- a/drivers/staging/vt6656/main_usb.c +++ b/drivers/staging/vt6656/main_usb.c @@ -595,8 +595,6 @@ static int vnt_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) priv->op_mode = vif->type; - vnt_set_bss_mode(priv); - /* LED blink on TX */ vnt_mac_set_led(priv, LEDSTS_STS, LEDSTS_INTER); @@ -683,7 +681,6 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw, priv->basic_rates = conf->basic_rates; vnt_update_top_rates(priv); - vnt_set_bss_mode(priv); dev_dbg(&priv->usb->dev, "basic rates %x\n", conf->basic_rates); } @@ -712,11 +709,14 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw, priv->short_slot_time = false; vnt_set_short_slot_time(priv); - vnt_update_ifs(priv); vnt_set_vga_gain_offset(priv, priv->bb_vga[0]); vnt_update_pre_ed_threshold(priv, false); } + if (changed & (BSS_CHANGED_BASIC_RATES | BSS_CHANGED_ERP_PREAMBLE | + BSS_CHANGED_ERP_SLOT)) + vnt_set_bss_mode(priv); + if (changed & BSS_CHANGED_TXPOWER) vnt_rf_setpower(priv, priv->current_rate, conf->chandef.chan->hw_value); @@ -740,12 +740,15 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw, vnt_mac_reg_bits_on(priv, MAC_REG_TFTCTL, TFTCTL_TSFCNTREN); - vnt_adjust_tsf(priv, conf->beacon_rate->hw_value, - conf->sync_tsf, priv->current_tsf); - vnt_mac_set_beacon_interval(priv, conf->beacon_int); vnt_reset_next_tbtt(priv, conf->beacon_int); + + vnt_adjust_tsf(priv, conf->beacon_rate->hw_value, + conf->sync_tsf, priv->current_tsf); + + vnt_update_next_tbtt(priv, + conf->sync_tsf, conf->beacon_int); } else { vnt_clear_current_tsf(priv); @@ -780,15 +783,11 @@ static void vnt_configure(struct ieee80211_hw *hw, { struct vnt_private *priv = hw->priv; u8 rx_mode = 0; - int rc; *total_flags &= FIF_ALLMULTI | FIF_OTHER_BSS | FIF_BCN_PRBRESP_PROMISC; - rc = vnt_control_in(priv, MESSAGE_TYPE_READ, MAC_REG_RCR, - MESSAGE_REQUEST_MACREG, sizeof(u8), &rx_mode); - - if (!rc) - rx_mode = RCR_MULTICAST | RCR_BROADCAST; + vnt_control_in(priv, MESSAGE_TYPE_READ, MAC_REG_RCR, + MESSAGE_REQUEST_MACREG, sizeof(u8), &rx_mode); dev_dbg(&priv->usb->dev, "rx mode in = %x\n", rx_mode); @@ -829,8 +828,12 @@ static int vnt_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, return -EOPNOTSUPP; break; case DISABLE_KEY: - if (test_bit(key->hw_key_idx, &priv->key_entry_inuse)) + if (test_bit(key->hw_key_idx, &priv->key_entry_inuse)) { clear_bit(key->hw_key_idx, &priv->key_entry_inuse); + + vnt_mac_disable_keyentry(priv, key->hw_key_idx); + } + default: break; } diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 03e9cb156df94990d12ed2d0b3d4044bf0ede86a..1633e26662687e6f41ff076f0150227fa4b06278 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -4275,30 +4275,37 @@ int iscsit_close_connection( if (!atomic_read(&sess->session_reinstatement) && atomic_read(&sess->session_fall_back_to_erl0)) { spin_unlock_bh(&sess->conn_lock); + complete_all(&sess->session_wait_comp); iscsit_close_session(sess); return 0; } else if (atomic_read(&sess->session_logout)) { pr_debug("Moving to TARG_SESS_STATE_FREE.\n"); sess->session_state = TARG_SESS_STATE_FREE; - spin_unlock_bh(&sess->conn_lock); - if (atomic_read(&sess->sleep_on_sess_wait_comp)) - complete(&sess->session_wait_comp); + if (atomic_read(&sess->session_close)) { + spin_unlock_bh(&sess->conn_lock); + complete_all(&sess->session_wait_comp); + iscsit_close_session(sess); + } else { + spin_unlock_bh(&sess->conn_lock); + } return 0; } else { pr_debug("Moving to TARG_SESS_STATE_FAILED.\n"); sess->session_state = TARG_SESS_STATE_FAILED; - if (!atomic_read(&sess->session_continuation)) { - spin_unlock_bh(&sess->conn_lock); + if (!atomic_read(&sess->session_continuation)) iscsit_start_time2retain_handler(sess); - } else - spin_unlock_bh(&sess->conn_lock); - if (atomic_read(&sess->sleep_on_sess_wait_comp)) - complete(&sess->session_wait_comp); + if (atomic_read(&sess->session_close)) { + spin_unlock_bh(&sess->conn_lock); + complete_all(&sess->session_wait_comp); + iscsit_close_session(sess); + } else { + spin_unlock_bh(&sess->conn_lock); + } return 0; } @@ -4404,9 +4411,9 @@ static void iscsit_logout_post_handler_closesession( complete(&conn->conn_logout_comp); iscsit_dec_conn_usage_count(conn); + atomic_set(&sess->session_close, 1); iscsit_stop_session(sess, sleep, sleep); iscsit_dec_session_usage_count(sess); - iscsit_close_session(sess); } static void iscsit_logout_post_handler_samecid( @@ -4541,49 +4548,6 @@ void iscsit_fail_session(struct iscsi_session *sess) sess->session_state = TARG_SESS_STATE_FAILED; } -int iscsit_free_session(struct iscsi_session *sess) -{ - u16 conn_count = atomic_read(&sess->nconn); - struct iscsi_conn *conn, *conn_tmp = NULL; - int is_last; - - spin_lock_bh(&sess->conn_lock); - atomic_set(&sess->sleep_on_sess_wait_comp, 1); - - list_for_each_entry_safe(conn, conn_tmp, &sess->sess_conn_list, - conn_list) { - if (conn_count == 0) - break; - - if (list_is_last(&conn->conn_list, &sess->sess_conn_list)) { - is_last = 1; - } else { - iscsit_inc_conn_usage_count(conn_tmp); - is_last = 0; - } - iscsit_inc_conn_usage_count(conn); - - spin_unlock_bh(&sess->conn_lock); - iscsit_cause_connection_reinstatement(conn, 1); - spin_lock_bh(&sess->conn_lock); - - iscsit_dec_conn_usage_count(conn); - if (is_last == 0) - iscsit_dec_conn_usage_count(conn_tmp); - - conn_count--; - } - - if (atomic_read(&sess->nconn)) { - spin_unlock_bh(&sess->conn_lock); - wait_for_completion(&sess->session_wait_comp); - } else - spin_unlock_bh(&sess->conn_lock); - - iscsit_close_session(sess); - return 0; -} - void iscsit_stop_session( struct iscsi_session *sess, int session_sleep, @@ -4594,8 +4558,6 @@ void iscsit_stop_session( int is_last; spin_lock_bh(&sess->conn_lock); - if (session_sleep) - atomic_set(&sess->sleep_on_sess_wait_comp, 1); if (connection_sleep) { list_for_each_entry_safe(conn, conn_tmp, &sess->sess_conn_list, @@ -4653,12 +4615,15 @@ int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force) spin_lock(&sess->conn_lock); if (atomic_read(&sess->session_fall_back_to_erl0) || atomic_read(&sess->session_logout) || + atomic_read(&sess->session_close) || (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) { spin_unlock(&sess->conn_lock); continue; } + iscsit_inc_session_usage_count(sess); atomic_set(&sess->session_reinstatement, 1); atomic_set(&sess->session_fall_back_to_erl0, 1); + atomic_set(&sess->session_close, 1); spin_unlock(&sess->conn_lock); list_move_tail(&se_sess->sess_list, &free_list); @@ -4668,7 +4633,9 @@ int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force) list_for_each_entry_safe(se_sess, se_sess_tmp, &free_list, sess_list) { sess = (struct iscsi_session *)se_sess->fabric_sess_ptr; - iscsit_free_session(sess); + list_del_init(&se_sess->sess_list); + iscsit_stop_session(sess, 1, 1); + iscsit_dec_session_usage_count(sess); session_count++; } diff --git a/drivers/target/iscsi/iscsi_target.h b/drivers/target/iscsi/iscsi_target.h index 48bac0acf8c76e055c022bc2b74423cca50b04bf..11a481cf6eadefabc14c46607df41a513f400bc2 100644 --- a/drivers/target/iscsi/iscsi_target.h +++ b/drivers/target/iscsi/iscsi_target.h @@ -43,7 +43,6 @@ extern int iscsi_target_rx_thread(void *); extern int iscsit_close_connection(struct iscsi_conn *); extern int iscsit_close_session(struct iscsi_session *); extern void iscsit_fail_session(struct iscsi_session *); -extern int iscsit_free_session(struct iscsi_session *); extern void iscsit_stop_session(struct iscsi_session *, int, int); extern int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *, int); diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c index 95d0a22b2ad61d362600a2df0c7fc80b98269c03..d25cadc4f4f11f7f476b127c8086c53064d4adcb 100644 --- a/drivers/target/iscsi/iscsi_target_configfs.c +++ b/drivers/target/iscsi/iscsi_target_configfs.c @@ -1501,20 +1501,23 @@ static void lio_tpg_close_session(struct se_session *se_sess) spin_lock(&sess->conn_lock); if (atomic_read(&sess->session_fall_back_to_erl0) || atomic_read(&sess->session_logout) || + atomic_read(&sess->session_close) || (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) { spin_unlock(&sess->conn_lock); spin_unlock_bh(&se_tpg->session_lock); return; } + iscsit_inc_session_usage_count(sess); atomic_set(&sess->session_reinstatement, 1); atomic_set(&sess->session_fall_back_to_erl0, 1); + atomic_set(&sess->session_close, 1); spin_unlock(&sess->conn_lock); iscsit_stop_time2retain_timer(sess); spin_unlock_bh(&se_tpg->session_lock); iscsit_stop_session(sess, 1, 1); - iscsit_close_session(sess); + iscsit_dec_session_usage_count(sess); } static u32 lio_tpg_get_inst_index(struct se_portal_group *se_tpg) diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index bb90c80ff3889bdde58df0877a6f7c603bc3eb05..f25049ba4a85b13ac3c77e8cab88ed969de5feb2 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c @@ -164,6 +164,7 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn) spin_lock(&sess_p->conn_lock); if (atomic_read(&sess_p->session_fall_back_to_erl0) || atomic_read(&sess_p->session_logout) || + atomic_read(&sess_p->session_close) || (sess_p->time2retain_timer_flags & ISCSI_TF_EXPIRED)) { spin_unlock(&sess_p->conn_lock); continue; @@ -174,6 +175,7 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn) (sess_p->sess_ops->SessionType == sessiontype))) { atomic_set(&sess_p->session_reinstatement, 1); atomic_set(&sess_p->session_fall_back_to_erl0, 1); + atomic_set(&sess_p->session_close, 1); spin_unlock(&sess_p->conn_lock); iscsit_inc_session_usage_count(sess_p); iscsit_stop_time2retain_timer(sess_p); @@ -198,7 +200,6 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn) if (sess->session_state == TARG_SESS_STATE_FAILED) { spin_unlock_bh(&sess->conn_lock); iscsit_dec_session_usage_count(sess); - iscsit_close_session(sess); return 0; } spin_unlock_bh(&sess->conn_lock); @@ -206,7 +207,6 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn) iscsit_stop_session(sess, 1, 1); iscsit_dec_session_usage_count(sess); - iscsit_close_session(sess); return 0; } @@ -494,6 +494,7 @@ static int iscsi_login_non_zero_tsih_s2( sess_p = (struct iscsi_session *)se_sess->fabric_sess_ptr; if (atomic_read(&sess_p->session_fall_back_to_erl0) || atomic_read(&sess_p->session_logout) || + atomic_read(&sess_p->session_close) || (sess_p->time2retain_timer_flags & ISCSI_TF_EXPIRED)) continue; if (!memcmp(sess_p->isid, pdu->isid, 6) && diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c index 10fae26b44addf7a12dafa3a47a03ad035669941..939c6212d2ac50af8c00216a6f474b5a1cc9a71f 100644 --- a/drivers/target/target_core_fabric_lib.c +++ b/drivers/target/target_core_fabric_lib.c @@ -76,7 +76,7 @@ static int fc_get_pr_transport_id( * encoded TransportID. */ ptr = &se_nacl->initiatorname[0]; - for (i = 0; i < 24; ) { + for (i = 0; i < 23; ) { if (!strncmp(&ptr[i], ":", 1)) { i++; continue; diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 854b2bcca7c1a46f213d9d8bd31aa4fb68028626..5668d02de10b0d933fd9de5a93d57652846ff08c 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -445,7 +445,7 @@ iblock_execute_zero_out(struct block_device *bdev, struct se_cmd *cmd) target_to_linux_sector(dev, cmd->t_task_lba), target_to_linux_sector(dev, sbc_get_write_same_sectors(cmd)), - GFP_KERNEL, false); + GFP_KERNEL, BLKDEV_ZERO_NOUNMAP); if (ret) return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 7ee0a75ce4526518dcb4ed18d1bed5e2a357ebf1..eff1e36ca03c2cfbe56d2a444c23fc87d008cbb6 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -2067,6 +2067,7 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level) mb->cmd_tail = 0; mb->cmd_head = 0; tcmu_flush_dcache_range(mb, sizeof(*mb)); + clear_bit(TCMU_DEV_BIT_BROKEN, &udev->flags); del_timer(&udev->cmd_timer); diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c index 438f62129b5b2db91667b57c94c5fe7cda7e37aa..be1195fdce80220b540fe94d042917f811a48655 100644 --- a/drivers/thermal/cpu_cooling.c +++ b/drivers/thermal/cpu_cooling.c @@ -26,12 +26,12 @@ #include #include #include -#include #include #include #include #include #include +#include #include @@ -91,9 +91,9 @@ struct cpufreq_cooling_device { struct cpufreq_policy *policy; struct list_head node; struct time_in_idle *idle_time; + struct cpu_cooling_ops *plat_ops; }; -static DEFINE_IDA(cpufreq_ida); static DEFINE_MUTEX(cooling_list_lock); static LIST_HEAD(cpufreq_cdev_list); @@ -342,7 +342,16 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev, cpufreq_cdev->cpufreq_state = state; cpufreq_cdev->clipped_freq = clip_freq; - cpufreq_update_policy(cpufreq_cdev->policy->cpu); + /* Check if the device has a platform mitigation function that + * can handle the CPU freq mitigation, if not, notify cpufreq + * framework. + */ + if (cpufreq_cdev->plat_ops && + cpufreq_cdev->plat_ops->ceil_limit) + cpufreq_cdev->plat_ops->ceil_limit(cpufreq_cdev->policy->cpu, + clip_freq); + else + cpufreq_update_policy(cpufreq_cdev->policy->cpu); return 0; } @@ -524,6 +533,9 @@ static struct notifier_block thermal_cpufreq_notifier_block = { * @policy: cpufreq policy * Normally this should be same as cpufreq policy->related_cpus. * @try_model: true if a power model should be used + * @plat_mitig_func: function that does the mitigation by changing the + * frequencies (Optional). By default, cpufreq framweork will + * be notified of the new limits. * * This interface function registers the cpufreq cooling device with the name * "thermal-cpufreq-%x". This api can support multiple instances of cpufreq @@ -535,13 +547,13 @@ static struct notifier_block thermal_cpufreq_notifier_block = { */ static struct thermal_cooling_device * __cpufreq_cooling_register(struct device_node *np, - struct cpufreq_policy *policy, bool try_model) + struct cpufreq_policy *policy, bool try_model, + struct cpu_cooling_ops *plat_ops) { struct thermal_cooling_device *cdev; struct cpufreq_cooling_device *cpufreq_cdev; char dev_name[THERMAL_NAME_LENGTH]; unsigned int i, num_cpus; - int ret; struct thermal_cooling_device_ops *cooling_ops; bool first; @@ -589,20 +601,16 @@ __cpufreq_cooling_register(struct device_node *np, #endif cooling_ops = &cpufreq_cooling_ops; - ret = ida_simple_get(&cpufreq_ida, 0, 0, GFP_KERNEL); - if (ret < 0) { - cdev = ERR_PTR(ret); - goto free_idle_time; - } - cpufreq_cdev->id = ret; + cpufreq_cdev->id = policy->cpu; snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d", cpufreq_cdev->id); + cpufreq_cdev->plat_ops = plat_ops; cdev = thermal_of_cooling_device_register(np, dev_name, cpufreq_cdev, cooling_ops); if (IS_ERR(cdev)) - goto remove_ida; + goto free_idle_time; cpufreq_cdev->clipped_freq = get_state_freq(cpufreq_cdev, 0); cpufreq_cdev->cdev = cdev; @@ -619,8 +627,6 @@ __cpufreq_cooling_register(struct device_node *np, return cdev; -remove_ida: - ida_simple_remove(&cpufreq_ida, cpufreq_cdev->id); free_idle_time: kfree(cpufreq_cdev->idle_time); free_cdev: @@ -642,7 +648,7 @@ __cpufreq_cooling_register(struct device_node *np, struct thermal_cooling_device * cpufreq_cooling_register(struct cpufreq_policy *policy) { - return __cpufreq_cooling_register(NULL, policy, false); + return __cpufreq_cooling_register(NULL, policy, false, NULL); } EXPORT_SYMBOL_GPL(cpufreq_cooling_register); @@ -678,7 +684,7 @@ of_cpufreq_cooling_register(struct cpufreq_policy *policy) } if (of_find_property(np, "#cooling-cells", NULL)) { - cdev = __cpufreq_cooling_register(np, policy, true); + cdev = __cpufreq_cooling_register(np, policy, true, NULL); if (IS_ERR(cdev)) { pr_err("cpu_cooling: cpu%d is not running as cooling device: %ld\n", policy->cpu, PTR_ERR(cdev)); @@ -691,6 +697,37 @@ of_cpufreq_cooling_register(struct cpufreq_policy *policy) } EXPORT_SYMBOL_GPL(of_cpufreq_cooling_register); +/** + * cpufreq_platform_cooling_register() - create cpufreq cooling device with + * additional platform specific mitigation function. + * + * @clip_cpus: cpumask of cpus where the frequency constraints will happen + * @plat_ops: the platform mitigation functions that will be called insted of + * cpufreq, if provided. + * + * Return: a valid struct thermal_cooling_device pointer on success, + * on failure, it returns a corresponding ERR_PTR(). + */ +struct thermal_cooling_device * +cpufreq_platform_cooling_register(struct cpufreq_policy *policy, + struct cpu_cooling_ops *plat_ops) +{ + struct device_node *cpu_node; + struct thermal_cooling_device *cdev = NULL; + + cpu_node = of_cpu_device_node_get(policy->cpu); + if (!cpu_node) { + pr_err("No cpu node\n"); + return ERR_PTR(-EINVAL); + } + cdev = __cpufreq_cooling_register(cpu_node, policy, false, + plat_ops); + + of_node_put(cpu_node); + return cdev; +} +EXPORT_SYMBOL(cpufreq_platform_cooling_register); + /** * cpufreq_cooling_unregister - function to remove cpufreq cooling device. * @cdev: thermal cooling device pointer. @@ -718,7 +755,6 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev) CPUFREQ_POLICY_NOTIFIER); thermal_cooling_device_unregister(cpufreq_cdev->cdev); - ida_simple_remove(&cpufreq_ida, cpufreq_cdev->id); kfree(cpufreq_cdev->idle_time); kfree(cpufreq_cdev); } diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c index 2fa7e1fbad011d73d22c3a894b24d1a7a454e974..dda85ee66df09bc77d8bbaeabaa68e4bd5aae6cd 100644 --- a/drivers/thermal/of-thermal.c +++ b/drivers/thermal/of-thermal.c @@ -649,6 +649,7 @@ thermal_zone_of_add_sensor(struct device_node *zone, if (sens_param->ops->set_emul_temp) tzd->ops->set_emul_temp = of_thermal_set_emul_temp; + list_add_tail(&tz->list, &sens_param->first_tz); mutex_unlock(&tzd->lock); return tzd; @@ -683,7 +684,9 @@ thermal_zone_of_add_sensor(struct device_node *zone, * that refer to it. * * Return: On success returns a valid struct thermal_zone_device, - * otherwise, it returns a corresponding ERR_PTR(). Caller must + * otherwise, it returns a corresponding ERR_PTR(). Incase there are multiple + * thermal zones referencing the same sensor, the return value will be + * thermal_zone_device pointer of the first thermal zone. Caller must * check the return value with help of IS_ERR() helper. */ struct thermal_zone_device * @@ -692,6 +695,7 @@ thermal_zone_of_sensor_register(struct device *dev, int sensor_id, void *data, { struct device_node *np, *child, *sensor_np; struct thermal_zone_device *tzd = ERR_PTR(-ENODEV); + struct thermal_zone_device *first_tzd = NULL; struct __sensor_param *sens_param = NULL; np = of_find_node_by_name(NULL, "thermal-zones"); @@ -710,11 +714,16 @@ thermal_zone_of_sensor_register(struct device *dev, int sensor_id, void *data, } sens_param->sensor_data = data; sens_param->ops = ops; + INIT_LIST_HEAD(&sens_param->first_tz); + sens_param->trip_high = INT_MAX; + sens_param->trip_low = INT_MIN; + mutex_init(&sens_param->lock); sensor_np = of_node_get(dev->of_node); for_each_available_child_of_node(np, child) { struct of_phandle_args sensor_specs; int ret, id; + struct __thermal_zone *tz; /* For now, thermal framework supports only 1 sensor per zone */ ret = of_parse_phandle_with_args(child, "thermal-sensors", @@ -735,22 +744,25 @@ thermal_zone_of_sensor_register(struct device *dev, int sensor_id, void *data, if (sensor_specs.np == sensor_np && id == sensor_id) { tzd = thermal_zone_of_add_sensor(child, sensor_np, sens_param); - if (!IS_ERR(tzd)) - tzd->ops->set_mode(tzd, THERMAL_DEVICE_ENABLED); - - of_node_put(sensor_specs.np); - of_node_put(child); - goto exit; + if (!IS_ERR(tzd)) { + if (!first_tzd) + first_tzd = tzd; + tz = tzd->devdata; + if (!tz->default_disable) + tzd->ops->set_mode(tzd, + THERMAL_DEVICE_ENABLED); + } } of_node_put(sensor_specs.np); } -exit: of_node_put(sensor_np); of_node_put(np); - if (tzd == ERR_PTR(-ENODEV)) + if (!first_tzd) { + first_tzd = ERR_PTR(-ENODEV); kfree(sens_param); - return tzd; + } + return first_tzd; } EXPORT_SYMBOL_GPL(thermal_zone_of_sensor_register); @@ -772,7 +784,9 @@ EXPORT_SYMBOL_GPL(thermal_zone_of_sensor_register); void thermal_zone_of_sensor_unregister(struct device *dev, struct thermal_zone_device *tzd) { - struct __thermal_zone *tz; + struct __thermal_zone *tz, *next; + struct thermal_zone_device *pos; + struct list_head *head; if (!dev || !tzd || !tzd->devdata) return; @@ -783,14 +797,20 @@ void thermal_zone_of_sensor_unregister(struct device *dev, if (!tz) return; - mutex_lock(&tzd->lock); - tzd->ops->get_temp = NULL; - tzd->ops->get_trend = NULL; - tzd->ops->set_emul_temp = NULL; - - kfree(tz->senps); - tz->senps = NULL; - mutex_unlock(&tzd->lock); + head = &tz->senps->first_tz; + list_for_each_entry_safe(tz, next, head, list) { + pos = tz->tzd; + mutex_lock(&pos->lock); + pos->ops->get_temp = NULL; + pos->ops->get_trend = NULL; + pos->ops->set_emul_temp = NULL; + + list_del(&tz->list); + if (list_empty(&tz->senps->first_tz)) + kfree(tz->senps); + tz->senps = NULL; + mutex_unlock(&pos->lock); + } } EXPORT_SYMBOL_GPL(thermal_zone_of_sensor_unregister); @@ -1191,6 +1211,7 @@ __init *thermal_of_build_thermal_zone(struct device_node *np) if (!tz) return ERR_PTR(-ENOMEM); + INIT_LIST_HEAD(&tz->list); ret = of_property_read_u32(np, "polling-delay-passive", &prop); if (ret < 0) { pr_err("missing polling-delay-passive property\n"); @@ -1207,6 +1228,8 @@ __init *thermal_of_build_thermal_zone(struct device_node *np) tz->is_wakeable = of_property_read_bool(np, "wake-capable-sensor"); + tz->default_disable = of_property_read_bool(np, + "disable-thermal-zone"); /* * REVIST: for now, the thermal framework supports only * one sensor per thermal zone. Thus, we are considering @@ -1383,7 +1406,9 @@ int __init of_parse_thermal_zones(void) kfree(ops); of_thermal_free_zone(tz); /* attempting to build remaining zones still */ + continue; } + tz->tzd = zone; } of_node_put(np); diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index 6d940cb8cebd93cf0f03a8d9dc24928f6ae8278c..b678c3d2c25ba9c856d33a80f791543e720ef2a2 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -767,8 +767,9 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz, sprintf(dev->attr_name, "cdev%d_trip_point", dev->id); sysfs_attr_init(&dev->attr.attr); dev->attr.attr.name = dev->attr_name; - dev->attr.attr.mode = 0444; + dev->attr.attr.mode = 0644; dev->attr.show = trip_point_show; + dev->attr.store = trip_point_store; result = device_create_file(&tz->device, &dev->attr); if (result) goto remove_symbol_link; @@ -972,7 +973,7 @@ static void bind_cdev(struct thermal_cooling_device *cdev) */ static struct thermal_cooling_device * __thermal_cooling_device_register(struct device_node *np, - char *type, void *devdata, + const char *type, void *devdata, const struct thermal_cooling_device_ops *ops) { struct thermal_cooling_device *cdev; @@ -1046,7 +1047,7 @@ __thermal_cooling_device_register(struct device_node *np, * ERR_PTR. Caller must check return value with IS_ERR*() helpers. */ struct thermal_cooling_device * -thermal_cooling_device_register(char *type, void *devdata, +thermal_cooling_device_register(const char *type, void *devdata, const struct thermal_cooling_device_ops *ops) { return __thermal_cooling_device_register(NULL, type, devdata, ops); @@ -1070,7 +1071,7 @@ EXPORT_SYMBOL_GPL(thermal_cooling_device_register); */ struct thermal_cooling_device * thermal_of_cooling_device_register(struct device_node *np, - char *type, void *devdata, + const char *type, void *devdata, const struct thermal_cooling_device_ops *ops) { return __thermal_cooling_device_register(np, type, devdata, ops); diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h index d1183139e433bee91bbeaa6a7f0946a6e73ec381..ee35e08be46561ff547a15de94e44e042013a590 100644 --- a/drivers/thermal/thermal_core.h +++ b/drivers/thermal/thermal_core.h @@ -61,6 +61,8 @@ void thermal_cooling_device_setup_sysfs(struct thermal_cooling_device *); void thermal_cooling_device_destroy_sysfs(struct thermal_cooling_device *cdev); /* used only at binding time */ ssize_t trip_point_show(struct device *, struct device_attribute *, char *); +ssize_t trip_point_store(struct device *, struct device_attribute *, + const char *, size_t); ssize_t weight_show(struct device *, struct device_attribute *, char *); ssize_t weight_store(struct device *, struct device_attribute *, const char *, size_t); diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c index aa99edb4dff7dca35439ed9536dee82f1e1c5b8c..80c8bae6dd1c2462b6cf8838c9623267bea94c4c 100644 --- a/drivers/thermal/thermal_sysfs.c +++ b/drivers/thermal/thermal_sysfs.c @@ -977,6 +977,26 @@ trip_point_show(struct device *dev, struct device_attribute *attr, char *buf) return sprintf(buf, "%d\n", instance->trip); } +ssize_t trip_point_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct thermal_instance *instance; + int ret, trip; + + ret = kstrtoint(buf, 0, &trip); + if (ret) + return ret; + + instance = container_of(attr, struct thermal_instance, attr); + + if (trip >= instance->tz->trips || trip < THERMAL_TRIPS_NONE) + return -EINVAL; + + instance->trip = trip; + + return count; +} + ssize_t weight_show(struct device *dev, struct device_attribute *attr, char *buf) { diff --git a/drivers/tty/ehv_bytechan.c b/drivers/tty/ehv_bytechan.c index eea4049b5dcc67836ec99f2f25faa8c4a24ccd4d..ca5004ae30245c5209389a688be7ec3a20ce1fe5 100644 --- a/drivers/tty/ehv_bytechan.c +++ b/drivers/tty/ehv_bytechan.c @@ -136,6 +136,21 @@ static int find_console_handle(void) return 1; } +static unsigned int local_ev_byte_channel_send(unsigned int handle, + unsigned int *count, + const char *p) +{ + char buffer[EV_BYTE_CHANNEL_MAX_BYTES]; + unsigned int c = *count; + + if (c < sizeof(buffer)) { + memcpy(buffer, p, c); + memset(&buffer[c], 0, sizeof(buffer) - c); + p = buffer; + } + return ev_byte_channel_send(handle, count, p); +} + /*************************** EARLY CONSOLE DRIVER ***************************/ #ifdef CONFIG_PPC_EARLY_DEBUG_EHV_BC @@ -154,7 +169,7 @@ static void byte_channel_spin_send(const char data) do { count = 1; - ret = ev_byte_channel_send(CONFIG_PPC_EARLY_DEBUG_EHV_BC_HANDLE, + ret = local_ev_byte_channel_send(CONFIG_PPC_EARLY_DEBUG_EHV_BC_HANDLE, &count, &data); } while (ret == EV_EAGAIN); } @@ -221,7 +236,7 @@ static int ehv_bc_console_byte_channel_send(unsigned int handle, const char *s, while (count) { len = min_t(unsigned int, count, EV_BYTE_CHANNEL_MAX_BYTES); do { - ret = ev_byte_channel_send(handle, &len, s); + ret = local_ev_byte_channel_send(handle, &len, s); } while (ret == EV_EAGAIN); count -= len; s += len; @@ -401,7 +416,7 @@ static void ehv_bc_tx_dequeue(struct ehv_bc_data *bc) CIRC_CNT_TO_END(bc->head, bc->tail, BUF_SIZE), EV_BYTE_CHANNEL_MAX_BYTES); - ret = ev_byte_channel_send(bc->handle, &len, bc->buf + bc->tail); + ret = local_ev_byte_channel_send(bc->handle, &len, bc->buf + bc->tail); /* 'len' is valid only if the return code is 0 or EV_EAGAIN */ if (!ret || (ret == EV_EAGAIN)) diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c index 27284a2dcd2b62395c5b140900ccca85a68ab683..436cc51c92c39765f9e9ba782fabecf62dda4830 100644 --- a/drivers/tty/hvc/hvc_console.c +++ b/drivers/tty/hvc/hvc_console.c @@ -302,10 +302,6 @@ int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops) vtermnos[index] = vtermno; cons_ops[index] = ops; - /* reserve all indices up to and including this index */ - if (last_hvc < index) - last_hvc = index; - /* check if we need to re-register the kernel console */ hvc_check_console(index); @@ -960,13 +956,22 @@ struct hvc_struct *hvc_alloc(uint32_t vtermno, int data, cons_ops[i] == hp->ops) break; - /* no matching slot, just use a counter */ - if (i >= MAX_NR_HVC_CONSOLES) - i = ++last_hvc; + if (i >= MAX_NR_HVC_CONSOLES) { + + /* find 'empty' slot for console */ + for (i = 0; i < MAX_NR_HVC_CONSOLES && vtermnos[i] != -1; i++) { + } + + /* no matching slot, just use a counter */ + if (i == MAX_NR_HVC_CONSOLES) + i = ++last_hvc + MAX_NR_HVC_CONSOLES; + } hp->index = i; - cons_ops[i] = ops; - vtermnos[i] = vtermno; + if (i < MAX_NR_HVC_CONSOLES) { + cons_ops[i] = ops; + vtermnos[i] = vtermno; + } list_add_tail(&(hp->next), &hvc_structs); mutex_unlock(&hvc_structs_mutex); diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c index 27aeca30eeae16845644a6edaf2f53d798ca2609..6133830f52a3c6ddd1ec9b7a70a3852972c83b77 100644 --- a/drivers/tty/rocket.c +++ b/drivers/tty/rocket.c @@ -632,18 +632,21 @@ init_r_port(int board, int aiop, int chan, struct pci_dev *pci_dev) tty_port_init(&info->port); info->port.ops = &rocket_port_ops; info->flags &= ~ROCKET_MODE_MASK; - switch (pc104[board][line]) { - case 422: - info->flags |= ROCKET_MODE_RS422; - break; - case 485: - info->flags |= ROCKET_MODE_RS485; - break; - case 232: - default: + if (board < ARRAY_SIZE(pc104) && line < ARRAY_SIZE(pc104_1)) + switch (pc104[board][line]) { + case 422: + info->flags |= ROCKET_MODE_RS422; + break; + case 485: + info->flags |= ROCKET_MODE_RS485; + break; + case 232: + default: + info->flags |= ROCKET_MODE_RS232; + break; + } + else info->flags |= ROCKET_MODE_RS232; - break; - } info->intmask = RXF_TRIG | TXFIFO_MT | SRC_INT | DELTA_CD | DELTA_CTS | DELTA_DSR; if (sInitChan(ctlp, &info->channel, aiop, chan) == 0) { diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig index df8bd0c7b97db06eff11016a8dd31448d0e97754..77d3c2bf2d52e22a17ff8ac623d9cf3011b02acc 100644 --- a/drivers/tty/serial/Kconfig +++ b/drivers/tty/serial/Kconfig @@ -989,6 +989,21 @@ config SERIAL_MSM_CONSOLE select SERIAL_CORE_CONSOLE select SERIAL_EARLYCON +config SERIAL_MSM_GENI_HALF_SAMPLING + bool "Changes clock divider which impacts sampling rate for QUP HW ver greater than 2.5.0" + help + Clock divider value should be doubled for QUP hardware version + greater than 2.5.0. + As earlycon can't have HW version awareness, decision is taken + based on the configuration. + +config SERIAL_MSM_GENI_EARLY_CONSOLE + bool "MSM on-chip GENI HW based early console support" + select SERIAL_MSM_GENI_HALF_SAMPLING + help + Serial early console driver for Qualcomm Technologies Inc's GENI + based QUP hardware. + config SERIAL_QCOM_GENI tristate "QCOM on-chip GENI based serial port support" depends on ARCH_QCOM || COMPILE_TEST diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile index daac675612dff4804869460e7946210dd1eb7466..3f15dd459763bf59d3f9a50e82972aa772d46029 100644 --- a/drivers/tty/serial/Makefile +++ b/drivers/tty/serial/Makefile @@ -58,6 +58,7 @@ obj-$(CONFIG_SERIAL_SGI_IOC3) += ioc3_serial.o obj-$(CONFIG_SERIAL_ATMEL) += atmel_serial.o obj-$(CONFIG_SERIAL_UARTLITE) += uartlite.o obj-$(CONFIG_SERIAL_MSM) += msm_serial.o +obj-$(CONFIG_SERIAL_MSM_GENI_EARLY_CONSOLE) += msm_geni_serial_console.o obj-$(CONFIG_SERIAL_QCOM_GENI) += qcom_geni_serial.o obj-$(CONFIG_SERIAL_NETX) += netx-serial.o obj-$(CONFIG_SERIAL_KS8695) += serial_ks8695.o diff --git a/drivers/tty/serial/msm_geni_serial_console.c b/drivers/tty/serial/msm_geni_serial_console.c new file mode 100644 index 0000000000000000000000000000000000000000..345370b98ef6a455c8563e6352bb4d3149c612eb --- /dev/null +++ b/drivers/tty/serial/msm_geni_serial_console.c @@ -0,0 +1,566 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + */ +#include +#include +#include +#include + + +#define SE_UART_TX_TRANS_CFG (0x25C) +#define SE_UART_TX_WORD_LEN (0x268) +#define SE_UART_TX_STOP_BIT_LEN (0x26C) +#define SE_UART_TX_TRANS_LEN (0x270) +#define SE_UART_TX_PARITY_CFG (0x2A4) +/* SE_UART_TRANS_CFG */ +#define UART_CTS_MASK (BIT(1)) +/* UART M_CMD OP codes */ +#define UART_START_TX (0x1) + +#define UART_OVERSAMPLING (32) +#define DEF_FIFO_DEPTH_WORDS (16) +#define DEF_TX_WM (2) +#define DEF_FIFO_WIDTH_BITS (32) + +#define GENI_INIT_CFG_REVISION (0x0) +#define GENI_S_INIT_CFG_REVISION (0x4) +#define GENI_FORCE_DEFAULT_REG (0x20) +#define GENI_OUTPUT_CTRL (0x24) +#define GENI_CGC_CTRL (0x28) +#define SE_GENI_STATUS (0x40) +#define GENI_SER_M_CLK_CFG (0x48) +#define GENI_CLK_CTRL_RO (0x60) +#define GENI_IF_FIFO_DISABLE_RO (0x64) +#define GENI_FW_REVISION_RO (0x68) +#define SE_GENI_CLK_SEL (0x7C) +#define SE_GENI_BYTE_GRAN (0x254) +#define SE_GENI_TX_PACKING_CFG0 (0x260) +#define SE_GENI_TX_PACKING_CFG1 (0x264) +#define SE_GENI_M_CMD0 (0x600) +#define SE_GENI_M_CMD_CTRL_REG (0x604) +#define SE_GENI_M_IRQ_STATUS (0x610) +#define SE_GENI_M_IRQ_EN (0x614) +#define SE_GENI_M_IRQ_CLEAR (0x618) +#define SE_GENI_TX_FIFOn (0x700) +#define SE_GENI_TX_FIFO_STATUS (0x800) +#define SE_GENI_TX_WATERMARK_REG (0x80C) +#define SE_GENI_IOS (0x908) +#define SE_GENI_M_GP_LENGTH (0x910) +#define SE_IRQ_EN (0xE1C) +#define SE_HW_PARAM_0 (0xE24) +#define SE_HW_PARAM_1 (0xE28) + +/* GENI_OUTPUT_CTRL fields */ +#define DEFAULT_IO_OUTPUT_CTRL_MSK (GENMASK(6, 0)) + +/* GENI_FORCE_DEFAULT_REG fields */ +#define FORCE_DEFAULT (BIT(0)) + +/* GENI_CGC_CTRL fields */ +#define CFG_AHB_CLK_CGC_ON (BIT(0)) +#define CFG_AHB_WR_ACLK_CGC_ON (BIT(1)) +#define DATA_AHB_CLK_CGC_ON (BIT(2)) +#define SCLK_CGC_ON (BIT(3)) +#define TX_CLK_CGC_ON (BIT(4)) +#define RX_CLK_CGC_ON (BIT(5)) +#define EXT_CLK_CGC_ON (BIT(6)) +#define PROG_RAM_HCLK_OFF (BIT(8)) +#define PROG_RAM_SCLK_OFF (BIT(9)) +#define DEFAULT_CGC_EN (GENMASK(6, 0)) + +/* GENI_STATUS fields */ +#define M_GENI_CMD_ACTIVE (BIT(0)) + +/* GENI_SER_M_CLK_CFG/GENI_SER_S_CLK_CFG */ +#define SER_CLK_EN (BIT(0)) +#define CLK_DIV_MSK (GENMASK(15, 4)) +#define CLK_DIV_SHFT (4) + +/* CLK_CTRL_RO fields */ + +/* FIFO_IF_DISABLE_RO fields */ +#define FIFO_IF_DISABLE (BIT(0)) + +/* FW_REVISION_RO fields */ +#define FW_REV_PROTOCOL_MSK (GENMASK(15, 8)) +#define FW_REV_PROTOCOL_SHFT (8) +#define FW_REV_VERSION_MSK (GENMASK(7, 0)) + +/* GENI_CLK_SEL fields */ +#define CLK_SEL_MSK (GENMASK(2, 0)) + +/* SE_GENI_DMA_MODE_EN */ +#define GENI_DMA_MODE_EN (BIT(0)) + +/* GENI_M_CMD0 fields */ +#define M_OPCODE_MSK (GENMASK(31, 27)) +#define M_OPCODE_SHFT (27) +#define M_PARAMS_MSK (GENMASK(26, 0)) + +/* GENI_M_CMD_CTRL_REG */ +#define M_GENI_CMD_CANCEL BIT(2) +#define M_GENI_CMD_ABORT BIT(1) +#define M_GENI_DISABLE BIT(0) + +/* GENI_M_IRQ_EN fields */ +#define M_CMD_DONE_EN (BIT(0)) +#define M_CMD_OVERRUN_EN (BIT(1)) +#define M_ILLEGAL_CMD_EN (BIT(2)) +#define M_CMD_FAILURE_EN (BIT(3)) +#define M_CMD_CANCEL_EN (BIT(4)) +#define M_CMD_ABORT_EN (BIT(5)) +#define M_TIMESTAMP_EN (BIT(6)) +#define M_GP_SYNC_IRQ_0_EN (BIT(8)) +#define M_IO_DATA_DEASSERT_EN (BIT(22)) +#define M_IO_DATA_ASSERT_EN (BIT(23)) +#define M_TX_FIFO_RD_ERR_EN (BIT(28)) +#define M_TX_FIFO_WR_ERR_EN (BIT(29)) +#define M_TX_FIFO_WATERMARK_EN (BIT(30)) +#define M_SEC_IRQ_EN (BIT(31)) +#define M_COMMON_GENI_M_IRQ_EN (GENMASK(6, 1) | \ + M_IO_DATA_DEASSERT_EN | \ + M_IO_DATA_ASSERT_EN | M_TX_FIFO_RD_ERR_EN | \ + M_TX_FIFO_WR_ERR_EN) + + +/* GENI_TX_FIFO_STATUS fields */ +#define TX_FIFO_WC (GENMASK(27, 0)) + +/* SE_IRQ_EN fields */ +#define GENI_M_IRQ_EN (BIT(2)) + +/* SE_HW_PARAM_0 fields */ +#define TX_FIFO_WIDTH_MSK (GENMASK(29, 24)) +#define TX_FIFO_WIDTH_SHFT (24) +#define TX_FIFO_DEPTH_MSK (GENMASK(21, 16)) +#define TX_FIFO_DEPTH_SHFT (16) + +enum se_protocol_types { + NONE, + SPI, + UART, + I2C, + I3C, +}; + +static void geni_write_reg_earlycon(unsigned int value, + void __iomem *base, int offset) +{ + writel_relaxed(value, (base + offset)); +} + +static unsigned int geni_read_reg_earlycon(void __iomem *base, int offset) +{ + return readl_relaxed(base + offset); +} + +static int get_se_proto_earlycon(void __iomem *base) +{ + int proto; + + proto = ((geni_read_reg_earlycon(base, GENI_FW_REVISION_RO) + & FW_REV_PROTOCOL_MSK) >> FW_REV_PROTOCOL_SHFT); + return proto; +} + +static void se_get_packing_config_earlycon(int bpw, int pack_words, + bool msb_to_lsb, unsigned long *cfg0, unsigned long *cfg1) +{ + u32 cfg[4] = {0}; + int len; + int temp_bpw = bpw; + int idx_start = (msb_to_lsb ? (bpw - 1) : 0); + int idx = idx_start; + int idx_delta = (msb_to_lsb ? -BITS_PER_BYTE : BITS_PER_BYTE); + int ceil_bpw = ((bpw & (BITS_PER_BYTE - 1)) ? + ((bpw & ~(BITS_PER_BYTE - 1)) + BITS_PER_BYTE) : bpw); + int iter = (ceil_bpw * pack_words) >> 3; + int i; + + if (unlikely(iter <= 0 || iter > 4)) { + *cfg0 = 0; + *cfg1 = 0; + return; + } + + for (i = 0; i < iter; i++) { + len = (temp_bpw < BITS_PER_BYTE) ? + (temp_bpw - 1) : BITS_PER_BYTE - 1; + cfg[i] = ((idx << 5) | (msb_to_lsb << 4) | (len << 1)); + idx = ((temp_bpw - BITS_PER_BYTE) <= 0) ? + ((i + 1) * BITS_PER_BYTE) + idx_start : + idx + idx_delta; + temp_bpw = ((temp_bpw - BITS_PER_BYTE) <= 0) ? + bpw : (temp_bpw - BITS_PER_BYTE); + } + cfg[iter - 1] |= 1; + *cfg0 = cfg[0] | (cfg[1] << 10); + *cfg1 = cfg[2] | (cfg[3] << 10); +} + +static int se_geni_irq_en_earlycon(void __iomem *base) +{ + unsigned int common_geni_m_irq_en; + + common_geni_m_irq_en = geni_read_reg_earlycon(base, + SE_GENI_M_IRQ_EN); + common_geni_m_irq_en |= M_COMMON_GENI_M_IRQ_EN; + geni_write_reg_earlycon(common_geni_m_irq_en, base, + SE_GENI_M_IRQ_EN); + return 0; +} + +static int se_io_set_mode_earlycon(void __iomem *base) +{ + unsigned int io_mode; + + io_mode = geni_read_reg_earlycon(base, SE_IRQ_EN); + + io_mode |= (GENI_M_IRQ_EN); + + geni_write_reg_earlycon(io_mode, base, SE_IRQ_EN); + return 0; +} + +static void se_io_init_earlycon(void __iomem *base) +{ + unsigned int io_op_ctrl; + unsigned int geni_cgc_ctrl; + + geni_cgc_ctrl = geni_read_reg_earlycon(base, GENI_CGC_CTRL); + geni_cgc_ctrl |= DEFAULT_CGC_EN; + io_op_ctrl = DEFAULT_IO_OUTPUT_CTRL_MSK; + geni_write_reg_earlycon(geni_cgc_ctrl, base, GENI_CGC_CTRL); + + geni_write_reg_earlycon(io_op_ctrl, base, GENI_OUTPUT_CTRL); + geni_write_reg_earlycon(FORCE_DEFAULT, base, + GENI_FORCE_DEFAULT_REG); +} + +static int geni_se_init_earlycon(void __iomem *base, unsigned int rx_wm, + unsigned int rx_rfr) +{ + int ret; + + se_io_init_earlycon(base); + ret = se_io_set_mode_earlycon(base); + if (ret) + return ret; + + ret = se_geni_irq_en_earlycon(base); + return ret; +} + +static int geni_se_select_fifo_mode_earlycon(void __iomem *base) +{ + unsigned int common_geni_m_irq_en; + + geni_write_reg_earlycon(0xFFFFFFFF, base, SE_GENI_M_IRQ_CLEAR); + geni_write_reg_earlycon(0xFFFFFFFF, base, SE_IRQ_EN); + + common_geni_m_irq_en = geni_read_reg_earlycon(base, + SE_GENI_M_IRQ_EN); + geni_write_reg_earlycon(common_geni_m_irq_en, base, + SE_GENI_M_IRQ_EN); + return 0; +} + +struct msm_geni_serial_earlycon_port { + struct uart_port uport; + char name[20]; + unsigned int tx_fifo_depth; + unsigned int tx_fifo_width; + unsigned int tx_wm; + unsigned int xmit_size; + void *console_log; + unsigned int cur_baud; +}; + +#define GET_DEV_PORT(uport) \ + container_of(uport, struct msm_geni_serial_earlycon_port, uport) + +static int get_clk_cfg(unsigned long clk_freq, unsigned long *ser_clk) +{ + unsigned long root_freq[] = {7372800, 14745600, 19200000, 29491200, + 32000000, 48000000, 64000000, 80000000, 96000000, 100000000, + 102400000, 112000000, 120000000, 128000000}; + int i; + int match = -1; + + for (i = 0; i < ARRAY_SIZE(root_freq); i++) { + if (clk_freq > root_freq[i]) + continue; + + if (!(root_freq[i] % clk_freq)) { + match = i; + break; + } + } + if (match != -1) + *ser_clk = root_freq[match]; + + return match; +} + + +static int get_clk_div_rate(unsigned int baud, unsigned long *desired_clk_rate) +{ + unsigned long ser_clk; + int dfs_index; + int clk_div = 0; + + *desired_clk_rate = baud * UART_OVERSAMPLING; + dfs_index = get_clk_cfg(*desired_clk_rate, &ser_clk); + if (dfs_index < 0) { + clk_div = -EINVAL; + goto exit_get_clk_div_rate; + } + + clk_div = ser_clk / *desired_clk_rate; + *desired_clk_rate = ser_clk; +exit_get_clk_div_rate: + return clk_div; +} + + +static void msm_geni_serial_wr_char(struct uart_port *uport, int ch) +{ + geni_write_reg_earlycon(ch, uport->membase, SE_GENI_TX_FIFOn); + /* + * Ensure FIFO write clear goes through before + * next iteration. + */ + mb(); + +} + +static int msm_geni_serial_poll_bit(struct uart_port *uport, + int offset, int bit_field, bool set) +{ + int iter = 0; + unsigned int reg; + bool met = false; + struct msm_geni_serial_earlycon_port *port = NULL; + bool cond = false; + unsigned int baud = 115200; + unsigned int fifo_bits = DEF_FIFO_DEPTH_WORDS * DEF_FIFO_WIDTH_BITS; + unsigned long total_iter = 1000; + + + if (uport->private_data && !uart_console(uport)) { + port = GET_DEV_PORT(uport); + baud = (port->cur_baud ? port->cur_baud : 115200); + fifo_bits = port->tx_fifo_depth * port->tx_fifo_width; + /* + * Total polling iterations based on FIFO worth of bytes to be + * sent at current baud .Add a little fluff to the wait. + */ + total_iter = ((fifo_bits * USEC_PER_SEC) / baud) / 10; + total_iter += 50; + } + + while (iter < total_iter) { + reg = geni_read_reg_earlycon(uport->membase, offset); + cond = reg & bit_field; + if (cond == set) { + met = true; + break; + } + udelay(10); + iter++; + } + return met; +} + +static void msm_geni_serial_poll_cancel_tx(struct uart_port *uport) +{ + int done = 0; + unsigned int irq_clear = M_CMD_DONE_EN; + + done = msm_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS, + M_CMD_DONE_EN, true); + if (!done) { + geni_write_reg_earlycon(M_GENI_CMD_ABORT, uport->membase, + SE_GENI_M_CMD_CTRL_REG); + irq_clear |= M_CMD_ABORT_EN; + msm_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS, + M_CMD_ABORT_EN, true); + } + geni_write_reg_earlycon(irq_clear, uport->membase, + SE_GENI_M_IRQ_CLEAR); +} + +static void msm_geni_serial_setup_tx(struct uart_port *uport, + unsigned int xmit_size) +{ + u32 m_cmd = 0; + + geni_write_reg_earlycon(xmit_size, uport->membase, + SE_UART_TX_TRANS_LEN); + m_cmd |= (UART_START_TX << M_OPCODE_SHFT); + geni_write_reg_earlycon(m_cmd, uport->membase, SE_GENI_M_CMD0); + /* + * Writes to enable the primary sequencer should go through before + * exiting this function. + */ + mb(); +} + + +static void +__msm_geni_serial_console_write(struct uart_port *uport, const char *s, + unsigned int count) +{ + int new_line = 0; + int i; + int bytes_to_send = count; + int fifo_depth = DEF_FIFO_DEPTH_WORDS; + int tx_wm = DEF_TX_WM; + + for (i = 0; i < count; i++) { + if (s[i] == '\n') + new_line++; + } + + bytes_to_send += new_line; + geni_write_reg_earlycon(tx_wm, uport->membase, + SE_GENI_TX_WATERMARK_REG); + msm_geni_serial_setup_tx(uport, bytes_to_send); + i = 0; + while (i < count) { + u32 chars_to_write = 0; + u32 avail_fifo_bytes = (fifo_depth - tx_wm); + /* + * If the WM bit never set, then the Tx state machine is not + * in a valid state, so break, cancel/abort any existing + * command. Unfortunately the current data being written is + * lost. + */ + while (!msm_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS, + M_TX_FIFO_WATERMARK_EN, true)) + break; + chars_to_write = min((unsigned int)(count - i), + avail_fifo_bytes); + if ((chars_to_write << 1) > avail_fifo_bytes) + chars_to_write = (avail_fifo_bytes >> 1); + uart_console_write(uport, (s + i), chars_to_write, + msm_geni_serial_wr_char); + geni_write_reg_earlycon(M_TX_FIFO_WATERMARK_EN, + uport->membase, SE_GENI_M_IRQ_CLEAR); + /* Ensure this goes through before polling for WM IRQ again.*/ + mb(); + i += chars_to_write; + } + msm_geni_serial_poll_cancel_tx(uport); +} + + +static void +msm_geni_serial_early_console_write(struct console *con, const char *s, + unsigned int n) +{ + struct earlycon_device *dev = con->data; + + __msm_geni_serial_console_write(&dev->port, s, n); +} + +static int __init +msm_geni_serial_earlycon_setup(struct earlycon_device *dev, + const char *opt) +{ + struct uart_port *uport = &dev->port; + int ret = 0; + u32 tx_trans_cfg = 0; + u32 tx_parity_cfg = 0; + u32 rx_trans_cfg = 0; + u32 rx_parity_cfg = 0; + u32 stop_bit = 0; + u32 rx_stale = 0; + u32 bits_per_char = 0; + u32 s_clk_cfg = 0; + u32 baud = 115200; + int clk_div; + unsigned long clk_rate; + unsigned long cfg0, cfg1; + + if (!uport->membase) { + ret = -ENOMEM; + goto exit_geni_serial_earlyconsetup; + } + + if (get_se_proto_earlycon(uport->membase) != UART) { + ret = -ENXIO; + goto exit_geni_serial_earlyconsetup; + } + + /* + * Ignore Flow control. + * Disable Tx Parity. + * Don't check Parity during Rx. + * Disable Rx Parity. + * n = 8. + * Stop bit = 0. + * Stale timeout in bit-time (3 chars worth). + */ + tx_trans_cfg |= UART_CTS_MASK; + tx_parity_cfg = 0; + rx_trans_cfg = 0; + rx_parity_cfg = 0; + bits_per_char = 0x8; + stop_bit = 0; + rx_stale = 0x18; + clk_div = get_clk_div_rate(baud, &clk_rate); + if (clk_div <= 0) { + ret = -EINVAL; + goto exit_geni_serial_earlyconsetup; + } + + if (IS_ENABLED(CONFIG_SERIAL_MSM_GENI_HALF_SAMPLING)) + clk_div *= 2; + + s_clk_cfg |= SER_CLK_EN; + s_clk_cfg |= (clk_div << CLK_DIV_SHFT); + + /* + * Make an unconditional cancel on the main sequencer to reset + * it else we could end up in data loss scenarios. + */ + geni_write_reg_earlycon(0x21, uport->membase, GENI_SER_M_CLK_CFG); + geni_read_reg_earlycon(uport->membase, GENI_SER_M_CLK_CFG); + + msm_geni_serial_poll_cancel_tx(uport); + + se_get_packing_config_earlycon(8, 1, false, &cfg0, &cfg1); + geni_se_init_earlycon(uport->membase, (DEF_FIFO_DEPTH_WORDS >> 1), + (DEF_FIFO_DEPTH_WORDS - 2)); + geni_se_select_fifo_mode_earlycon(uport->membase); + geni_write_reg_earlycon(cfg0, uport->membase, + SE_GENI_TX_PACKING_CFG0); + geni_write_reg_earlycon(cfg1, uport->membase, + SE_GENI_TX_PACKING_CFG1); + geni_write_reg_earlycon(tx_trans_cfg, uport->membase, + SE_UART_TX_TRANS_CFG); + geni_write_reg_earlycon(tx_parity_cfg, uport->membase, + SE_UART_TX_PARITY_CFG); + geni_write_reg_earlycon(bits_per_char, uport->membase, + SE_UART_TX_WORD_LEN); + geni_write_reg_earlycon(stop_bit, uport->membase, + SE_UART_TX_STOP_BIT_LEN); + geni_write_reg_earlycon(s_clk_cfg, uport->membase, + GENI_SER_M_CLK_CFG); + geni_read_reg_earlycon(uport->membase, + GENI_SER_M_CLK_CFG); + + dev->con->write = msm_geni_serial_early_console_write; + dev->con->setup = NULL; + /* + * Ensure that the early console setup completes before + * returning. + */ + mb(); +exit_geni_serial_earlyconsetup: + return ret; +} +OF_EARLYCON_DECLARE(msm_geni_serial, "qcom,msm-geni-console", + msm_geni_serial_earlycon_setup); diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index 2a5bf4c14fb89c648f2e30334a9e583508f9eb2c..d2e387379abcf7a472f2e8764f4420a7a2ab5744 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -2555,6 +2555,7 @@ struct tty_driver *uart_console_device(struct console *co, int *index) *index = co->index; return p->tty_driver; } +EXPORT_SYMBOL_GPL(uart_console_device); static ssize_t uart_get_attr_uartclk(struct device *dev, struct device_attribute *attr, char *buf) diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index 9e1a6af23ca2b1257ebcaab83cb80a2c7935ded3..8aaa7900927a83393ab0d5b903eadc276c83c26e 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c @@ -873,9 +873,16 @@ static void sci_receive_chars(struct uart_port *port) tty_insert_flip_char(tport, c, TTY_NORMAL); } else { for (i = 0; i < count; i++) { - char c = serial_port_in(port, SCxRDR); - - status = serial_port_in(port, SCxSR); + char c; + + if (port->type == PORT_SCIF || + port->type == PORT_HSCIF) { + status = serial_port_in(port, SCxSR); + c = serial_port_in(port, SCxRDR); + } else { + c = serial_port_in(port, SCxRDR); + status = serial_port_in(port, SCxSR); + } if (uart_handle_sysrq_char(port, c)) { count--; i--; continue; diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c index cee0274806c551f9971abc250c226606e8df59c2..8b4a46ae7400e87b1b7d6750a8a512b16a55539c 100644 --- a/drivers/tty/sysrq.c +++ b/drivers/tty/sysrq.c @@ -134,17 +134,10 @@ static struct sysrq_key_op sysrq_unraw_op = { static void sysrq_handle_crash(int key) { - char *killer = NULL; - - /* we need to release the RCU read lock here, - * otherwise we get an annoying - * 'BUG: sleeping function called from invalid context' - * complaint from the kernel before the panic. - */ + /* release the RCU read lock before crashing */ rcu_read_unlock(); - panic_on_oops = 1; /* force panic */ - wmb(); - *killer = 1; + + panic("sysrq triggered crash\n"); } static struct sysrq_key_op sysrq_crash_op = { .handler = sysrq_handle_crash, diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index 36c6f1b98372f2e08c79007f21301129bfde1374..5c7a968a5ea6753bbd5b1eba17cbfd1914ea62cc 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -81,6 +81,7 @@ #include #include #include +#include #include #include #include @@ -350,7 +351,7 @@ static struct uni_screen *vc_uniscr_alloc(unsigned int cols, unsigned int rows) /* allocate everything in one go */ memsize = cols * rows * sizeof(char32_t); memsize += rows * sizeof(char32_t *); - p = kmalloc(memsize, GFP_KERNEL); + p = vmalloc(memsize); if (!p) return NULL; @@ -364,9 +365,14 @@ static struct uni_screen *vc_uniscr_alloc(unsigned int cols, unsigned int rows) return uniscr; } +static void vc_uniscr_free(struct uni_screen *uniscr) +{ + vfree(uniscr); +} + static void vc_uniscr_set(struct vc_data *vc, struct uni_screen *new_uniscr) { - kfree(vc->vc_uni_screen); + vc_uniscr_free(vc->vc_uni_screen); vc->vc_uni_screen = new_uniscr; } @@ -1209,7 +1215,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc, if (new_cols == vc->vc_cols && new_rows == vc->vc_rows) return 0; - if (new_screen_size > (4 << 20)) + if (new_screen_size > KMALLOC_MAX_SIZE) return -EINVAL; newscreen = kzalloc(new_screen_size, GFP_USER); if (!newscreen) @@ -1232,7 +1238,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc, err = resize_screen(vc, new_cols, new_rows, user); if (err) { kfree(newscreen); - kfree(new_uniscr); + vc_uniscr_free(new_uniscr); return err; } diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 6e0b41861735ad69401130750426151062211d44..10ba1b4f0dbfb6e0d0b1a12f6985bbd22152cefc 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -412,9 +412,12 @@ static void acm_ctrl_irq(struct urb *urb) exit: retval = usb_submit_urb(urb, GFP_ATOMIC); - if (retval && retval != -EPERM) + if (retval && retval != -EPERM && retval != -ENODEV) dev_err(&acm->control->dev, "%s - usb_submit_urb failed: %d\n", __func__, retval); + else + dev_vdbg(&acm->control->dev, + "control resubmission terminated %d\n", retval); } static int acm_submit_read_urb(struct acm *acm, int index, gfp_t mem_flags) @@ -430,6 +433,8 @@ static int acm_submit_read_urb(struct acm *acm, int index, gfp_t mem_flags) dev_err(&acm->data->dev, "urb %d failed submission with %d\n", index, res); + } else { + dev_vdbg(&acm->data->dev, "intended failure %d\n", res); } set_bit(index, &acm->read_urbs_free); return res; @@ -472,6 +477,7 @@ static void acm_read_bulk_callback(struct urb *urb) int status = urb->status; bool stopped = false; bool stalled = false; + bool cooldown = false; dev_vdbg(&acm->data->dev, "got urb %d, len %d, status %d\n", rb->index, urb->actual_length, status); @@ -498,6 +504,14 @@ static void acm_read_bulk_callback(struct urb *urb) __func__, status); stopped = true; break; + case -EOVERFLOW: + case -EPROTO: + dev_dbg(&acm->data->dev, + "%s - cooling babbling device\n", __func__); + usb_mark_last_busy(acm->dev); + set_bit(rb->index, &acm->urbs_in_error_delay); + cooldown = true; + break; default: dev_dbg(&acm->data->dev, "%s - nonzero urb status received: %d\n", @@ -519,9 +533,11 @@ static void acm_read_bulk_callback(struct urb *urb) */ smp_mb__after_atomic(); - if (stopped || stalled) { + if (stopped || stalled || cooldown) { if (stalled) schedule_work(&acm->work); + else if (cooldown) + schedule_delayed_work(&acm->dwork, HZ / 2); return; } @@ -563,14 +579,20 @@ static void acm_softint(struct work_struct *work) struct acm *acm = container_of(work, struct acm, work); if (test_bit(EVENT_RX_STALL, &acm->flags)) { - if (!(usb_autopm_get_interface(acm->data))) { + smp_mb(); /* against acm_suspend() */ + if (!acm->susp_count) { for (i = 0; i < acm->rx_buflimit; i++) usb_kill_urb(acm->read_urbs[i]); usb_clear_halt(acm->dev, acm->in); acm_submit_read_urbs(acm, GFP_KERNEL); - usb_autopm_put_interface(acm->data); + clear_bit(EVENT_RX_STALL, &acm->flags); } - clear_bit(EVENT_RX_STALL, &acm->flags); + } + + if (test_and_clear_bit(ACM_ERROR_DELAY, &acm->flags)) { + for (i = 0; i < ACM_NR; i++) + if (test_and_clear_bit(i, &acm->urbs_in_error_delay)) + acm_submit_read_urb(acm, i, GFP_NOIO); } if (test_and_clear_bit(EVENT_TTY_WAKEUP, &acm->flags)) @@ -1365,6 +1387,7 @@ static int acm_probe(struct usb_interface *intf, acm->readsize = readsize; acm->rx_buflimit = num_rx_buf; INIT_WORK(&acm->work, acm_softint); + INIT_DELAYED_WORK(&acm->dwork, acm_softint); init_waitqueue_head(&acm->wioctl); spin_lock_init(&acm->write_lock); spin_lock_init(&acm->read_lock); @@ -1574,6 +1597,7 @@ static void acm_disconnect(struct usb_interface *intf) acm_kill_urbs(acm); cancel_work_sync(&acm->work); + cancel_delayed_work_sync(&acm->dwork); tty_unregister_device(acm_tty_driver, acm->minor); @@ -1616,6 +1640,8 @@ static int acm_suspend(struct usb_interface *intf, pm_message_t message) acm_kill_urbs(acm); cancel_work_sync(&acm->work); + cancel_delayed_work_sync(&acm->dwork); + acm->urbs_in_error_delay = 0; return 0; } diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h index 515aad0847ee84b408e7fceb503081f5d97fc78b..30380d28a504b8715417f6f720103ea480ab562a 100644 --- a/drivers/usb/class/cdc-acm.h +++ b/drivers/usb/class/cdc-acm.h @@ -108,8 +108,11 @@ struct acm { unsigned long flags; # define EVENT_TTY_WAKEUP 0 # define EVENT_RX_STALL 1 +# define ACM_ERROR_DELAY 3 + unsigned long urbs_in_error_delay; /* these need to be restarted after a delay */ struct usb_cdc_line_coding line; /* bits, stop, parity */ - struct work_struct work; /* work queue entry for line discipline waking up */ + struct work_struct work; /* work queue entry for various purposes*/ + struct delayed_work dwork; /* for cool downs needed in error recovery */ unsigned int ctrlin; /* input control lines (DCD, DSR, RI, break, overruns) */ unsigned int ctrlout; /* output control lines (DTR, RTS) */ struct async_icount iocount; /* counters for control line changes */ diff --git a/drivers/usb/common/common.c b/drivers/usb/common/common.c index 73c8e65917461f8f83d9233c96bdf0d2b8956b27..9c5c53617d082a193a0bf86beb37d931bf2d518d 100644 --- a/drivers/usb/common/common.c +++ b/drivers/usb/common/common.c @@ -100,6 +100,7 @@ static const char *const usb_dr_modes[] = { [USB_DR_MODE_HOST] = "host", [USB_DR_MODE_PERIPHERAL] = "peripheral", [USB_DR_MODE_OTG] = "otg", + [USB_DR_MODE_DRD] = "drd", }; static enum usb_dr_mode usb_get_dr_mode_from_string(const char *str) diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c index 2025261e97a1c6f261c9a35b764a5cfa33e87aa6..339df67b52a6898cd84e0543d0adf7bbc819b80d 100644 --- a/drivers/usb/core/config.c +++ b/drivers/usb/core/config.c @@ -1086,6 +1086,15 @@ int usb_get_bos_descriptor(struct usb_device *dev) case USB_PTM_CAP_TYPE: dev->bos->ptm_cap = (struct usb_ptm_cap_descriptor *)buffer; + break; + case USB_CAP_TYPE_CONFIG_SUMMARY: + /* one such desc per function */ + if (!dev->bos->num_config_summary_desc) + dev->bos->config_summary = + (struct usb_config_summary_descriptor *)buffer; + + dev->bos->num_config_summary_desc++; + break; default: break; } diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c index 3255b2bb0fd56d721858ee5bae0d545be4df7b16..60c2ec4daf75b44a22a42771683ad60aa599fcdd 100644 --- a/drivers/usb/core/driver.c +++ b/drivers/usb/core/driver.c @@ -1458,6 +1458,9 @@ int usb_suspend(struct device *dev, pm_message_t msg) struct usb_device *udev = to_usb_device(dev); int r; + if (udev->bus->skip_resume && udev->state == USB_STATE_SUSPENDED) + return 0; + unbind_no_pm_drivers_interfaces(udev); /* From now on we are sure all drivers support suspend/resume @@ -1494,6 +1497,15 @@ int usb_resume(struct device *dev, pm_message_t msg) struct usb_device *udev = to_usb_device(dev); int status; + /* + * Some buses would like to keep their devices in suspend + * state after system resume. Their resume happen when + * a remote wakeup is detected or interface driver start + * I/O. + */ + if (udev->bus->skip_resume) + return 0; + /* For all calls, take the device back to full power and * tell the PM core in case it was autosuspended previously. * Unbind the interfaces that will need rebinding later, diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c index bc8242bc4564b8969071729e144b8529cff89a4c..761073d32eaca20eac0cb935124cb0f915512497 100644 --- a/drivers/usb/core/generic.c +++ b/drivers/usb/core/generic.c @@ -42,6 +42,36 @@ static int is_activesync(struct usb_interface_descriptor *desc) && desc->bInterfaceProtocol == 1; } +static int get_usb_audio_config(struct usb_host_bos *bos) +{ + unsigned int desc_cnt, num_cfg_desc, len = 0; + unsigned char *buffer; + struct usb_config_summary_descriptor *conf_summary; + + if (!bos || !bos->config_summary) + goto done; + + num_cfg_desc = bos->num_config_summary_desc; + conf_summary = bos->config_summary; + buffer = (unsigned char *)conf_summary; + for (desc_cnt = 0; desc_cnt < num_cfg_desc; desc_cnt++) { + conf_summary = + (struct usb_config_summary_descriptor *)(buffer + len); + + len += conf_summary->bLength; + + if (conf_summary->bcdVersion != USB_CONFIG_SUMMARY_DESC_REV || + conf_summary->bClass != USB_CLASS_AUDIO) + continue; + + /* return 1st config as per device preference */ + return conf_summary->bConfigurationIndex[0]; + } + +done: + return -EINVAL; +} + int usb_choose_configuration(struct usb_device *udev) { int i; @@ -145,7 +175,10 @@ int usb_choose_configuration(struct usb_device *udev) insufficient_power, plural(insufficient_power)); if (best) { - i = best->desc.bConfigurationValue; + /* choose device preferred config */ + i = get_usb_audio_config(udev->bos); + if (i < 0) + i = best->desc.bConfigurationValue; dev_dbg(&udev->dev, "configuration #%d chosen from %d choice%s\n", i, num_configs, plural(num_configs)); diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 6823a39e699e6602ea44a4e631ec0ea363a9b8dc..ebc4a84a2221cef80cd4283454071be60cffb310 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -2257,6 +2257,30 @@ int usb_hcd_sec_event_ring_cleanup(struct usb_device *udev, /*-------------------------------------------------------------------------*/ +phys_addr_t +usb_hcd_get_sec_event_ring_phys_addr(struct usb_device *udev, + unsigned int intr_num, dma_addr_t *dma) +{ + struct usb_hcd *hcd = bus_to_hcd(udev->bus); + + if (!HCD_RH_RUNNING(hcd)) + return 0; + + return hcd->driver->get_sec_event_ring_phys_addr(hcd, intr_num, dma); +} + +phys_addr_t +usb_hcd_get_xfer_ring_phys_addr(struct usb_device *udev, + struct usb_host_endpoint *ep, dma_addr_t *dma) +{ + struct usb_hcd *hcd = bus_to_hcd(udev->bus); + + if (!HCD_RH_RUNNING(hcd)) + return 0; + + return hcd->driver->get_xfer_ring_phys_addr(hcd, udev, ep, dma); +} + int usb_hcd_get_controller_id(struct usb_device *udev) { struct usb_hcd *hcd = bus_to_hcd(udev->bus); @@ -2543,6 +2567,7 @@ void usb_hc_died (struct usb_hcd *hcd) } spin_unlock_irqrestore (&hcd_root_hub_lock, flags); /* Make sure that the other roothub is also deallocated. */ + usb_atomic_notify_dead_bus(&hcd->self); } EXPORT_SYMBOL_GPL (usb_hc_died); diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index b4d0915710d66ec6a54d5279d4c45697b3e112c7..43cfc9a3cc4ba0415352a0c30634084cbc8d36a0 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -1197,6 +1197,11 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) #ifdef CONFIG_PM udev->reset_resume = 1; #endif + /* Don't set the change_bits when the device + * was powered off. + */ + if (test_bit(port1, hub->power_bits)) + set_bit(port1, hub->change_bits); } else { /* The power session is gone; tell hub_wq */ @@ -3052,6 +3057,15 @@ static int check_port_resume_type(struct usb_device *udev, if (portchange & USB_PORT_STAT_C_ENABLE) usb_clear_port_feature(hub->hdev, port1, USB_PORT_FEAT_C_ENABLE); + + /* + * Whatever made this reset-resume necessary may have + * turned on the port1 bit in hub->change_bits. But after + * a successful reset-resume we want the bit to be clear; + * if it was on it would indicate that something happened + * following the reset-resume. + */ + clear_bit(port1, hub->change_bits); } return status; diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index 0d3fd208316569bb28095881f2830480592f54e3..fcf84bfc08e34fd1947e230fdc83b6366f815800 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c @@ -588,12 +588,13 @@ void usb_sg_cancel(struct usb_sg_request *io) int i, retval; spin_lock_irqsave(&io->lock, flags); - if (io->status) { + if (io->status || io->count == 0) { spin_unlock_irqrestore(&io->lock, flags); return; } /* shut everything down */ io->status = -ECONNRESET; + io->count++; /* Keep the request alive until we're done */ spin_unlock_irqrestore(&io->lock, flags); for (i = io->entries - 1; i >= 0; --i) { @@ -607,6 +608,12 @@ void usb_sg_cancel(struct usb_sg_request *io) dev_warn(&io->dev->dev, "%s, unlink --> %d\n", __func__, retval); } + + spin_lock_irqsave(&io->lock, flags); + io->count--; + if (!io->count) + complete(&io->complete); + spin_unlock_irqrestore(&io->lock, flags); } EXPORT_SYMBOL_GPL(usb_sg_cancel); diff --git a/drivers/usb/core/notify.c b/drivers/usb/core/notify.c index ab474b11523ee8f901a48dcbc18a3434c573e3d5..1dd68096a2fd766e3d94648674805f6b07aeea56 100644 --- a/drivers/usb/core/notify.c +++ b/drivers/usb/core/notify.c @@ -19,6 +19,7 @@ #include "usb.h" static BLOCKING_NOTIFIER_HEAD(usb_notifier_list); +static ATOMIC_NOTIFIER_HEAD(usb_atomic_notifier_list); /** * usb_register_notify - register a notifier callback whenever a usb change happens @@ -69,3 +70,33 @@ void usb_notify_remove_bus(struct usb_bus *ubus) { blocking_notifier_call_chain(&usb_notifier_list, USB_BUS_REMOVE, ubus); } + +/** + * usb_register_atomic_notify - register a atomic notifier callback whenever a + * HC dies + * @nb: pointer to the atomic notifier block for the callback events. + * + */ +void usb_register_atomic_notify(struct notifier_block *nb) +{ + atomic_notifier_chain_register(&usb_atomic_notifier_list, nb); +} +EXPORT_SYMBOL_GPL(usb_register_atomic_notify); + +/** + * usb_unregister_atomic_notify - unregister a atomic notifier callback + * @nb: pointer to the notifier block for the callback events. + * + */ +void usb_unregister_atomic_notify(struct notifier_block *nb) +{ + atomic_notifier_chain_unregister(&usb_atomic_notifier_list, nb); +} +EXPORT_SYMBOL_GPL(usb_unregister_atomic_notify); + + +void usb_atomic_notify_dead_bus(struct usb_bus *ubus) +{ + atomic_notifier_call_chain(&usb_atomic_notifier_list, USB_BUS_DIED, + ubus); +} diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index da30b5664ff3d5da22c5219159545e9faa72fdca..3e8efe759c3e69a8f8c29f5cafe8c0a6b1cca6fc 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -430,6 +430,10 @@ static const struct usb_device_id usb_quirk_list[] = { /* Corsair K70 LUX */ { USB_DEVICE(0x1b1c, 0x1b36), .driver_info = USB_QUIRK_DELAY_INIT }, + /* Corsair K70 RGB RAPDIFIRE */ + { USB_DEVICE(0x1b1c, 0x1b38), .driver_info = USB_QUIRK_DELAY_INIT | + USB_QUIRK_DELAY_CTRL_MSG }, + /* MIDI keyboard WORLDE MINI */ { USB_DEVICE(0x1c75, 0x0204), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS }, diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c index 7e58698f0931c05ad9973ec26c5b4c932116319b..35f632165c8f9f8fd046d28c762ff5cf7382fed0 100644 --- a/drivers/usb/core/usb.c +++ b/drivers/usb/core/usb.c @@ -842,6 +842,27 @@ int usb_sec_event_ring_cleanup(struct usb_device *dev, } EXPORT_SYMBOL(usb_sec_event_ring_cleanup); +phys_addr_t +usb_get_sec_event_ring_phys_addr(struct usb_device *dev, + unsigned int intr_num, dma_addr_t *dma) +{ + if (dev->state == USB_STATE_NOTATTACHED) + return 0; + + return usb_hcd_get_sec_event_ring_phys_addr(dev, intr_num, dma); +} +EXPORT_SYMBOL_GPL(usb_get_sec_event_ring_phys_addr); + +phys_addr_t usb_get_xfer_ring_phys_addr(struct usb_device *dev, + struct usb_host_endpoint *ep, dma_addr_t *dma) +{ + if (dev->state == USB_STATE_NOTATTACHED) + return 0; + + return usb_hcd_get_xfer_ring_phys_addr(dev, ep, dma); +} +EXPORT_SYMBOL_GPL(usb_get_xfer_ring_phys_addr); + /** * usb_get_controller_id - returns the host controller id. * @dev: the device whose host controller id is being queried. diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h index c0df5a468d78ca7f959a892b677fa13b79d0991f..dff5b36f320ded14f4f688e0e03d639ac9a4a20b 100644 --- a/drivers/usb/core/usb.h +++ b/drivers/usb/core/usb.h @@ -192,6 +192,7 @@ extern void usb_notify_add_device(struct usb_device *udev); extern void usb_notify_remove_device(struct usb_device *udev); extern void usb_notify_add_bus(struct usb_bus *ubus); extern void usb_notify_remove_bus(struct usb_bus *ubus); +extern void usb_atomic_notify_dead_bus(struct usb_bus *ubus); extern void usb_hub_adjust_deviceremovable(struct usb_device *hdev, struct usb_hub_descriptor *desc); diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index 6666d2a52bf56d6294afa7f856897fe7d4cd4eab..60d08269ad9a0245644cc348eeba80868398f6f3 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -981,6 +981,9 @@ static int dwc3_core_init(struct dwc3 *dwc) if (dwc->dis_tx_ipgap_linecheck_quirk) reg |= DWC3_GUCTL1_TX_IPGAP_LINECHECK_DIS; + if (dwc->parkmode_disable_ss_quirk) + reg |= DWC3_GUCTL1_PARKMODE_DISABLE_SS; + dwc3_writel(dwc->regs, DWC3_GUCTL1, reg); } @@ -1287,6 +1290,8 @@ static void dwc3_get_properties(struct dwc3 *dwc) "snps,dis-del-phy-power-chg-quirk"); dwc->dis_tx_ipgap_linecheck_quirk = device_property_read_bool(dev, "snps,dis-tx-ipgap-linecheck-quirk"); + dwc->parkmode_disable_ss_quirk = device_property_read_bool(dev, + "snps,parkmode-disable-ss-quirk"); dwc->tx_de_emphasis_quirk = device_property_read_bool(dev, "snps,tx_de_emphasis_quirk"); diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h index 131028501752b46ddc7cc32da35901e2d58e2edc..d6968b90ee6bb3e47c86b2cd45e0d05360abf71f 100644 --- a/drivers/usb/dwc3/core.h +++ b/drivers/usb/dwc3/core.h @@ -242,6 +242,7 @@ #define DWC3_GUCTL_HSTINAUTORETRY BIT(14) /* Global User Control 1 Register */ +#define DWC3_GUCTL1_PARKMODE_DISABLE_SS BIT(17) #define DWC3_GUCTL1_TX_IPGAP_LINECHECK_DIS BIT(28) #define DWC3_GUCTL1_DEV_L1_EXIT_BY_HW BIT(24) @@ -299,6 +300,10 @@ #define DWC3_GTXFIFOSIZ_TXFDEF(n) ((n) & 0xffff) #define DWC3_GTXFIFOSIZ_TXFSTADDR(n) ((n) & 0xffff0000) +/* Global RX Fifo Size Register */ +#define DWC31_GRXFIFOSIZ_RXFDEP(n) ((n) & 0x7fff) /* DWC_usb31 only */ +#define DWC3_GRXFIFOSIZ_RXFDEP(n) ((n) & 0xffff) + /* Global Event Size Registers */ #define DWC3_GEVNTSIZ_INTMASK BIT(31) #define DWC3_GEVNTSIZ_SIZE(n) ((n) & 0xffff) @@ -992,6 +997,8 @@ struct dwc3_scratchpad_array { * change quirk. * @dis_tx_ipgap_linecheck_quirk: set if we disable u2mac linestate * check during HS transmit. + * @parkmode_disable_ss_quirk: set if we need to disable all SuperSpeed + * instances in park mode. * @tx_de_emphasis_quirk: set if we enable Tx de-emphasis quirk * @tx_de_emphasis: Tx de-emphasis value * 0 - -6dB de-emphasis @@ -1163,6 +1170,7 @@ struct dwc3 { unsigned dis_u2_freeclk_exists_quirk:1; unsigned dis_del_phy_power_chg_quirk:1; unsigned dis_tx_ipgap_linecheck_quirk:1; + unsigned parkmode_disable_ss_quirk:1; unsigned tx_de_emphasis_quirk:1; unsigned tx_de_emphasis:2; diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index d482f89ffae2d58eac6a02b09831061f30e05e27..99f6a5aa010954541e60ac170811debf7cb2eeae 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -688,12 +688,13 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, unsigned int action) return 0; } -static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force); +static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, + bool interrupt); static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep) { struct dwc3_request *req; - dwc3_stop_active_transfer(dep, true); + dwc3_stop_active_transfer(dep, true, false); /* - giveback all requests to gadget driver */ while (!list_empty(&dep->started_list)) { @@ -1369,7 +1370,7 @@ static void dwc3_gadget_ep_skip_trbs(struct dwc3_ep *dep, struct dwc3_request *r for (i = 0; i < req->num_trbs; i++) { struct dwc3_trb *trb; - trb = req->trb + i; + trb = &dep->trb_pool[dep->trb_dequeue]; trb->ctrl &= ~DWC3_TRB_CTRL_HWO; dwc3_ep_inc_deq(dep); } @@ -1416,7 +1417,7 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep, } if (r == req) { /* wait until it is processed */ - dwc3_stop_active_transfer(dep, true); + dwc3_stop_active_transfer(dep, true, true); if (!r->trb) goto out0; @@ -1576,7 +1577,6 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc) u32 reg; u8 link_state; - u8 speed; /* * According to the Databook Remote wakeup request should @@ -1586,16 +1586,13 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc) */ reg = dwc3_readl(dwc->regs, DWC3_DSTS); - speed = reg & DWC3_DSTS_CONNECTSPD; - if ((speed == DWC3_DSTS_SUPERSPEED) || - (speed == DWC3_DSTS_SUPERSPEED_PLUS)) - return 0; - link_state = DWC3_DSTS_USBLNKST(reg); switch (link_state) { + case DWC3_LINK_STATE_RESET: case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */ case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */ + case DWC3_LINK_STATE_RESUME: break; default: return -EINVAL; @@ -2035,7 +2032,6 @@ static int dwc3_gadget_init_in_endpoint(struct dwc3_ep *dep) { struct dwc3 *dwc = dep->dwc; int mdwidth; - int kbytes; int size; mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0); @@ -2051,17 +2047,17 @@ static int dwc3_gadget_init_in_endpoint(struct dwc3_ep *dep) /* FIFO Depth is in MDWDITH bytes. Multiply */ size *= mdwidth; - kbytes = size / 1024; - if (kbytes == 0) - kbytes = 1; - /* - * FIFO sizes account an extra MDWIDTH * (kbytes + 1) bytes for - * internal overhead. We don't really know how these are used, - * but documentation say it exists. + * To meet performance requirement, a minimum TxFIFO size of 3x + * MaxPacketSize is recommended for endpoints that support burst and a + * minimum TxFIFO size of 2x MaxPacketSize for endpoints that don't + * support burst. Use those numbers and we can calculate the max packet + * limit as below. */ - size -= mdwidth * (kbytes + 1); - size /= kbytes; + if (dwc->maximum_speed >= USB_SPEED_SUPER) + size /= 3; + else + size /= 2; usb_ep_set_maxpacket_limit(&dep->endpoint, size); @@ -2079,8 +2075,39 @@ static int dwc3_gadget_init_in_endpoint(struct dwc3_ep *dep) static int dwc3_gadget_init_out_endpoint(struct dwc3_ep *dep) { struct dwc3 *dwc = dep->dwc; + int mdwidth; + int size; + + mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0); - usb_ep_set_maxpacket_limit(&dep->endpoint, 1024); + /* MDWIDTH is represented in bits, convert to bytes */ + mdwidth /= 8; + + /* All OUT endpoints share a single RxFIFO space */ + size = dwc3_readl(dwc->regs, DWC3_GRXFIFOSIZ(0)); + if (dwc3_is_usb31(dwc)) + size = DWC31_GRXFIFOSIZ_RXFDEP(size); + else + size = DWC3_GRXFIFOSIZ_RXFDEP(size); + + /* FIFO depth is in MDWDITH bytes */ + size *= mdwidth; + + /* + * To meet performance requirement, a minimum recommended RxFIFO size + * is defined as follow: + * RxFIFO size >= (3 x MaxPacketSize) + + * (3 x 8 bytes setup packets size) + (16 bytes clock crossing margin) + * + * Then calculate the max packet limit as below. + */ + size -= (3 * 8) + 16; + if (size < 0) + size = 0; + else + size /= 3; + + usb_ep_set_maxpacket_limit(&dep->endpoint, size); dep->endpoint.max_streams = 15; dep->endpoint.ops = &dwc3_gadget_ep_ops; list_add_tail(&dep->endpoint.ep_list, @@ -2279,14 +2306,7 @@ static int dwc3_gadget_ep_reclaim_trb_linear(struct dwc3_ep *dep, static bool dwc3_gadget_ep_request_completed(struct dwc3_request *req) { - /* - * For OUT direction, host may send less than the setup - * length. Return true for all OUT requests. - */ - if (!req->direction) - return true; - - return req->request.actual == req->request.length; + return req->num_pending_sgs == 0; } static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep, @@ -2310,8 +2330,7 @@ static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep, req->request.actual = req->request.length - req->remaining; - if (!dwc3_gadget_ep_request_completed(req) || - req->num_pending_sgs) { + if (!dwc3_gadget_ep_request_completed(req)) { __dwc3_gadget_kick_transfer(dep); goto out; } @@ -2365,10 +2384,8 @@ static void dwc3_gadget_endpoint_transfer_in_progress(struct dwc3_ep *dep, dwc3_gadget_ep_cleanup_completed_requests(dep, event, status); - if (stop) { - dwc3_stop_active_transfer(dep, true); - dep->flags = DWC3_EP_ENABLED; - } + if (stop) + dwc3_stop_active_transfer(dep, true, true); /* * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround. @@ -2488,7 +2505,8 @@ static void dwc3_reset_gadget(struct dwc3 *dwc) } } -static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force) +static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, + bool interrupt) { struct dwc3 *dwc = dep->dwc; struct dwc3_gadget_ep_cmd_params params; @@ -2532,7 +2550,7 @@ static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force) cmd = DWC3_DEPCMD_ENDTRANSFER; cmd |= force ? DWC3_DEPCMD_HIPRI_FORCERM : 0; - cmd |= DWC3_DEPCMD_CMDIOC; + cmd |= interrupt ? DWC3_DEPCMD_CMDIOC : 0; cmd |= DWC3_DEPCMD_PARAM(dep->resource_index); memset(¶ms, 0, sizeof(params)); ret = dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms); @@ -3166,7 +3184,6 @@ int dwc3_gadget_init(struct dwc3 *dwc) dwc->gadget.speed = USB_SPEED_UNKNOWN; dwc->gadget.sg_supported = true; dwc->gadget.name = "dwc3-gadget"; - dwc->gadget.is_otg = dwc->dr_mode == USB_DR_MODE_OTG; /* * FIXME We might be setting max_speed to = 0 && cdev->delayed_status) result = USB_GADGET_DELAYED_STATUS; @@ -2381,6 +2386,7 @@ void composite_suspend(struct usb_gadget *gadget) cdev->suspended = 1; + usb_gadget_set_selfpowered(gadget); usb_gadget_vbus_draw(gadget, 2); } @@ -2409,6 +2415,9 @@ void composite_resume(struct usb_gadget *gadget) else maxpower = min(maxpower, 900U); + if (maxpower > USB_SELF_POWER_VBUS_MAX_DRAW) + usb_gadget_clear_selfpowered(gadget); + usb_gadget_vbus_draw(gadget, maxpower); } diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index a9239455eb6d855ae0b1561ba034f39d256d6860..11a501d0664cc446c413dcd37b6654d6e023d2ba 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -1036,6 +1036,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC); if (unlikely(ret)) { + io_data->req = NULL; usb_ep_free_request(ep->ep, req); goto error_lock; } @@ -1736,6 +1737,10 @@ static void ffs_data_reset(struct ffs_data *ffs) ffs->state = FFS_READ_DESCRIPTORS; ffs->setup_state = FFS_NO_SETUP; ffs->flags = 0; + + ffs->ms_os_descs_ext_prop_count = 0; + ffs->ms_os_descs_ext_prop_name_len = 0; + ffs->ms_os_descs_ext_prop_data_len = 0; } diff --git a/drivers/usb/gadget/udc/bdc/bdc_ep.c b/drivers/usb/gadget/udc/bdc/bdc_ep.c index a4d9b5e1e50ea09747edc4b1bb0a4fadf7b440d8..d49c6dc1082dc980b47620a63fb28e35f5780c67 100644 --- a/drivers/usb/gadget/udc/bdc/bdc_ep.c +++ b/drivers/usb/gadget/udc/bdc/bdc_ep.c @@ -540,7 +540,7 @@ static void bdc_req_complete(struct bdc_ep *ep, struct bdc_req *req, { struct bdc *bdc = ep->bdc; - if (req == NULL || &req->queue == NULL || &req->usb_req == NULL) + if (req == NULL) return; dev_dbg(bdc->dev, "%s ep:%s status:%d\n", __func__, ep->name, status); diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c index 9061d5d07b36ad7582696f0f57e513bcfeee319d..9f8a008081a6a7816c1d5599984c24cad9362235 100644 --- a/drivers/usb/gadget/udc/core.c +++ b/drivers/usb/gadget/udc/core.c @@ -506,6 +506,22 @@ int usb_gadget_wakeup(struct usb_gadget *gadget) } EXPORT_SYMBOL_GPL(usb_gadget_wakeup); +/** + * usb_gsi_ep_op - performs operation on GSI accelerated EP based on EP op code + * + * Operations such as EP configuration, TRB allocation, StartXfer etc. + * See gsi_ep_op for more details. + */ +int usb_gsi_ep_op(struct usb_ep *ep, + struct usb_gsi_request *req, enum gsi_ep_op op) +{ + if (ep && ep->ops && ep->ops->gsi_ep_op) + return ep->ops->gsi_ep_op(ep, req, op); + + return -EOPNOTSUPP; +} +EXPORT_SYMBOL_GPL(usb_gsi_ep_op); + /** * usb_gadget_func_wakeup - send a function remote wakeup up notification * to the host connected to this gadget diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index a024230f00e2df4ce9f08bb817eb3eb106b31a14..a58ef53e4ae1dcb1cb077e0f7262a109dfa97118 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -1266,7 +1266,16 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, xhci_set_link_state(xhci, ports[wIndex], link_state); spin_unlock_irqrestore(&xhci->lock, flags); - msleep(20); /* wait device to enter */ + if (link_state == USB_SS_PORT_LS_U3) { + int retries = 16; + + while (retries--) { + usleep_range(4000, 8000); + temp = readl(ports[wIndex]->addr); + if ((temp & PORT_PLS_MASK) == XDEV_U3) + break; + } + } spin_lock_irqsave(&xhci->lock, flags); temp = readl(ports[wIndex]->addr); @@ -1472,6 +1481,8 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf) } if ((temp & PORT_RC)) reset_change = true; + if (temp & PORT_OC) + status = 1; } if (!status && !reset_change) { xhci_dbg(xhci, "%s: stopping port polling.\n", __func__); @@ -1537,6 +1548,13 @@ int xhci_bus_suspend(struct usb_hcd *hcd) port_index); goto retry; } + /* bail out if port detected a over-current condition */ + if (t1 & PORT_OC) { + bus_state->bus_suspended = 0; + spin_unlock_irqrestore(&xhci->lock, flags); + xhci_dbg(xhci, "Bus suspend bailout, port over-current detected\n"); + return -EBUSY; + } /* suspend ports in U0, or bail out for new connect changes */ if ((t1 & PORT_PE) && (t1 & PORT_PLS_MASK) == XDEV_U0) { if ((t1 & PORT_CSC) && wake_enabled) { diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index ce23d2dc971425c316f5a45d17328413f7a76fa3..a55e8b19fc62ea37cdf3d85921629153f0ce224b 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -1147,8 +1147,10 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) xhci_dbg(xhci, "Stop HCD\n"); xhci_halt(xhci); xhci_zero_64b_regs(xhci); - xhci_reset(xhci); + retval = xhci_reset(xhci); spin_unlock_irq(&xhci->lock); + if (retval) + return retval; xhci_cleanup_msix(xhci); xhci_dbg(xhci, "// Disabling event ring interrupts\n"); @@ -5183,6 +5185,79 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) } EXPORT_SYMBOL_GPL(xhci_gen_setup); +static phys_addr_t xhci_get_sec_event_ring_phys_addr(struct usb_hcd *hcd, + unsigned int intr_num, dma_addr_t *dma) +{ + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + struct device *dev = hcd->self.sysdev; + struct sg_table sgt; + phys_addr_t pa; + + if (intr_num > xhci->max_interrupters) { + xhci_err(xhci, "intr num %d > max intrs %d\n", intr_num, + xhci->max_interrupters); + return 0; + } + + if (!(xhci->xhc_state & XHCI_STATE_HALTED) && + xhci->sec_event_ring && xhci->sec_event_ring[intr_num] + && xhci->sec_event_ring[intr_num]->first_seg) { + + dma_get_sgtable(dev, &sgt, + xhci->sec_event_ring[intr_num]->first_seg->trbs, + xhci->sec_event_ring[intr_num]->first_seg->dma, + TRB_SEGMENT_SIZE); + + *dma = xhci->sec_event_ring[intr_num]->first_seg->dma; + + pa = page_to_phys(sg_page(sgt.sgl)); + sg_free_table(&sgt); + + return pa; + } + + return 0; +} + +static phys_addr_t xhci_get_xfer_ring_phys_addr(struct usb_hcd *hcd, + struct usb_device *udev, struct usb_host_endpoint *ep, dma_addr_t *dma) +{ + int ret; + unsigned int ep_index; + struct xhci_virt_device *virt_dev; + struct device *dev = hcd->self.sysdev; + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + struct sg_table sgt; + phys_addr_t pa; + + ret = xhci_check_args(hcd, udev, ep, 1, true, __func__); + if (ret <= 0) { + xhci_err(xhci, "%s: invalid args\n", __func__); + return 0; + } + + virt_dev = xhci->devs[udev->slot_id]; + ep_index = xhci_get_endpoint_index(&ep->desc); + + if (virt_dev->eps[ep_index].ring && + virt_dev->eps[ep_index].ring->first_seg) { + + dma_get_sgtable(dev, &sgt, + virt_dev->eps[ep_index].ring->first_seg->trbs, + virt_dev->eps[ep_index].ring->first_seg->dma, + TRB_SEGMENT_SIZE); + + *dma = virt_dev->eps[ep_index].ring->first_seg->dma; + + pa = page_to_phys(sg_page(sgt.sgl)); + sg_free_table(&sgt); + + return pa; + } + + return 0; +} + static int xhci_stop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep) { @@ -5302,6 +5377,8 @@ static const struct hc_driver xhci_hc_driver = { .find_raw_port_number = xhci_find_raw_port_number, .sec_event_ring_setup = xhci_sec_event_ring_setup, .sec_event_ring_cleanup = xhci_sec_event_ring_cleanup, + .get_sec_event_ring_phys_addr = xhci_get_sec_event_ring_phys_addr, + .get_xfer_ring_phys_addr = xhci_get_xfer_ring_phys_addr, .stop_endpoint = xhci_stop_endpoint, }; diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c index c4f6ac5f035eba70005ae0e470b6c67d9c18df86..6376be1f5fd22585821bd9f8cc8a227518ed1f5e 100644 --- a/drivers/usb/misc/sisusbvga/sisusb.c +++ b/drivers/usb/misc/sisusbvga/sisusb.c @@ -1199,18 +1199,18 @@ static int sisusb_read_mem_bulk(struct sisusb_usb_data *sisusb, u32 addr, /* High level: Gfx (indexed) register access */ #ifdef INCL_SISUSB_CON -int sisusb_setreg(struct sisusb_usb_data *sisusb, int port, u8 data) +int sisusb_setreg(struct sisusb_usb_data *sisusb, u32 port, u8 data) { return sisusb_write_memio_byte(sisusb, SISUSB_TYPE_IO, port, data); } -int sisusb_getreg(struct sisusb_usb_data *sisusb, int port, u8 *data) +int sisusb_getreg(struct sisusb_usb_data *sisusb, u32 port, u8 *data) { return sisusb_read_memio_byte(sisusb, SISUSB_TYPE_IO, port, data); } #endif -int sisusb_setidxreg(struct sisusb_usb_data *sisusb, int port, +int sisusb_setidxreg(struct sisusb_usb_data *sisusb, u32 port, u8 index, u8 data) { int ret; @@ -1220,7 +1220,7 @@ int sisusb_setidxreg(struct sisusb_usb_data *sisusb, int port, return ret; } -int sisusb_getidxreg(struct sisusb_usb_data *sisusb, int port, +int sisusb_getidxreg(struct sisusb_usb_data *sisusb, u32 port, u8 index, u8 *data) { int ret; @@ -1230,7 +1230,7 @@ int sisusb_getidxreg(struct sisusb_usb_data *sisusb, int port, return ret; } -int sisusb_setidxregandor(struct sisusb_usb_data *sisusb, int port, u8 idx, +int sisusb_setidxregandor(struct sisusb_usb_data *sisusb, u32 port, u8 idx, u8 myand, u8 myor) { int ret; @@ -1245,7 +1245,7 @@ int sisusb_setidxregandor(struct sisusb_usb_data *sisusb, int port, u8 idx, } static int sisusb_setidxregmask(struct sisusb_usb_data *sisusb, - int port, u8 idx, u8 data, u8 mask) + u32 port, u8 idx, u8 data, u8 mask) { int ret; u8 tmp; @@ -1258,13 +1258,13 @@ static int sisusb_setidxregmask(struct sisusb_usb_data *sisusb, return ret; } -int sisusb_setidxregor(struct sisusb_usb_data *sisusb, int port, +int sisusb_setidxregor(struct sisusb_usb_data *sisusb, u32 port, u8 index, u8 myor) { return sisusb_setidxregandor(sisusb, port, index, 0xff, myor); } -int sisusb_setidxregand(struct sisusb_usb_data *sisusb, int port, +int sisusb_setidxregand(struct sisusb_usb_data *sisusb, u32 port, u8 idx, u8 myand) { return sisusb_setidxregandor(sisusb, port, idx, myand, 0x00); @@ -2787,8 +2787,8 @@ static loff_t sisusb_lseek(struct file *file, loff_t offset, int orig) static int sisusb_handle_command(struct sisusb_usb_data *sisusb, struct sisusb_command *y, unsigned long arg) { - int retval, port, length; - u32 address; + int retval, length; + u32 port, address; /* All our commands require the device * to be initialized. diff --git a/drivers/usb/misc/sisusbvga/sisusb_init.h b/drivers/usb/misc/sisusbvga/sisusb_init.h index 1782c759c4ad582fbf9fb6e6fc7f6ce78bf7739a..ace09985dae4c53a81efe333a1e2b93204da150c 100644 --- a/drivers/usb/misc/sisusbvga/sisusb_init.h +++ b/drivers/usb/misc/sisusbvga/sisusb_init.h @@ -812,17 +812,17 @@ static const struct SiS_VCLKData SiSUSB_VCLKData[] = { int SiSUSBSetMode(struct SiS_Private *SiS_Pr, unsigned short ModeNo); int SiSUSBSetVESAMode(struct SiS_Private *SiS_Pr, unsigned short VModeNo); -extern int sisusb_setreg(struct sisusb_usb_data *sisusb, int port, u8 data); -extern int sisusb_getreg(struct sisusb_usb_data *sisusb, int port, u8 * data); -extern int sisusb_setidxreg(struct sisusb_usb_data *sisusb, int port, +extern int sisusb_setreg(struct sisusb_usb_data *sisusb, u32 port, u8 data); +extern int sisusb_getreg(struct sisusb_usb_data *sisusb, u32 port, u8 * data); +extern int sisusb_setidxreg(struct sisusb_usb_data *sisusb, u32 port, u8 index, u8 data); -extern int sisusb_getidxreg(struct sisusb_usb_data *sisusb, int port, +extern int sisusb_getidxreg(struct sisusb_usb_data *sisusb, u32 port, u8 index, u8 * data); -extern int sisusb_setidxregandor(struct sisusb_usb_data *sisusb, int port, +extern int sisusb_setidxregandor(struct sisusb_usb_data *sisusb, u32 port, u8 idx, u8 myand, u8 myor); -extern int sisusb_setidxregor(struct sisusb_usb_data *sisusb, int port, +extern int sisusb_setidxregor(struct sisusb_usb_data *sisusb, u32 port, u8 index, u8 myor); -extern int sisusb_setidxregand(struct sisusb_usb_data *sisusb, int port, +extern int sisusb_setidxregand(struct sisusb_usb_data *sisusb, u32 port, u8 idx, u8 myand); void sisusb_delete(struct kref *kref); diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c index 633550ec3025536058633aca680251d1c604e957..f29c3a936a08115b596f9a8134e7c527dcd358e3 100644 --- a/drivers/usb/serial/garmin_gps.c +++ b/drivers/usb/serial/garmin_gps.c @@ -1138,8 +1138,8 @@ static void garmin_read_process(struct garmin_data *garmin_data_p, send it directly to the tty port */ if (garmin_data_p->flags & FLAGS_QUEUING) { pkt_add(garmin_data_p, data, data_length); - } else if (bulk_data || - getLayerId(data) == GARMIN_LAYERID_APPL) { + } else if (bulk_data || (data_length >= sizeof(u32) && + getLayerId(data) == GARMIN_LAYERID_APPL)) { spin_lock_irqsave(&garmin_data_p->lock, flags); garmin_data_p->flags |= APP_RESP_SEEN; diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index 613f91add03da189c0fd5334cbccbbf720060079..ce0401d3137f1341929b3c3027175ac11938ca2f 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c @@ -173,6 +173,7 @@ static const struct usb_device_id id_table[] = { {DEVICE_SWI(0x413c, 0x81b3)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */ {DEVICE_SWI(0x413c, 0x81b5)}, /* Dell Wireless 5811e QDL */ {DEVICE_SWI(0x413c, 0x81b6)}, /* Dell Wireless 5811e QDL */ + {DEVICE_SWI(0x413c, 0x81cc)}, /* Dell Wireless 5816e */ {DEVICE_SWI(0x413c, 0x81cf)}, /* Dell Wireless 5819 */ {DEVICE_SWI(0x413c, 0x81d0)}, /* Dell Wireless 5819 */ {DEVICE_SWI(0x413c, 0x81d1)}, /* Dell Wireless 5818 */ diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c index 62ca8e29da48fa0a51abda388211afb156616a24..27d8b4b6ff593c119162a1e4ed06b3161199b693 100644 --- a/drivers/usb/storage/uas.c +++ b/drivers/usb/storage/uas.c @@ -81,6 +81,19 @@ static void uas_free_streams(struct uas_dev_info *devinfo); static void uas_log_cmd_state(struct scsi_cmnd *cmnd, const char *prefix, int status); +/* + * This driver needs its own workqueue, as we need to control memory allocation. + * + * In the course of error handling and power management uas_wait_for_pending_cmnds() + * needs to flush pending work items. In these contexts we cannot allocate memory + * by doing block IO as we would deadlock. For the same reason we cannot wait + * for anything allocating memory not heeding these constraints. + * + * So we have to control all work items that can be on the workqueue we flush. + * Hence we cannot share a queue and need our own. + */ +static struct workqueue_struct *workqueue; + static void uas_do_work(struct work_struct *work) { struct uas_dev_info *devinfo = @@ -109,7 +122,7 @@ static void uas_do_work(struct work_struct *work) if (!err) cmdinfo->state &= ~IS_IN_WORK_LIST; else - schedule_work(&devinfo->work); + queue_work(workqueue, &devinfo->work); } out: spin_unlock_irqrestore(&devinfo->lock, flags); @@ -134,7 +147,7 @@ static void uas_add_work(struct uas_cmd_info *cmdinfo) lockdep_assert_held(&devinfo->lock); cmdinfo->state |= IS_IN_WORK_LIST; - schedule_work(&devinfo->work); + queue_work(workqueue, &devinfo->work); } static void uas_zap_pending(struct uas_dev_info *devinfo, int result) @@ -190,6 +203,9 @@ static void uas_log_cmd_state(struct scsi_cmnd *cmnd, const char *prefix, struct uas_cmd_info *ci = (void *)&cmnd->SCp; struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp; + if (status == -ENODEV) /* too late */ + return; + scmd_printk(KERN_INFO, cmnd, "%s %d uas-tag %d inflight:%s%s%s%s%s%s%s%s%s%s%s%s ", prefix, status, cmdinfo->uas_tag, @@ -1233,7 +1249,31 @@ static struct usb_driver uas_driver = { .id_table = uas_usb_ids, }; -module_usb_driver(uas_driver); +static int __init uas_init(void) +{ + int rv; + + workqueue = alloc_workqueue("uas", WQ_MEM_RECLAIM, 0); + if (!workqueue) + return -ENOMEM; + + rv = usb_register(&uas_driver); + if (rv) { + destroy_workqueue(workqueue); + return -ENOMEM; + } + + return 0; +} + +static void __exit uas_exit(void) +{ + usb_deregister(&uas_driver); + destroy_workqueue(workqueue); +} + +module_init(uas_init); +module_exit(uas_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR( diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index 1880f3e13f57633d7c401998d33f859e0423cc31..f6c3681fa2e9efa4d861199aa2fe12a1c8334652 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -2323,6 +2323,13 @@ UNUSUAL_DEV( 0x3340, 0xffff, 0x0000, 0x0000, USB_SC_DEVICE,USB_PR_DEVICE,NULL, US_FL_MAX_SECTORS_64 ), +/* Reported by Cyril Roelandt */ +UNUSUAL_DEV( 0x357d, 0x7788, 0x0114, 0x0114, + "JMicron", + "USB to ATA/ATAPI Bridge", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_BROKEN_FUA ), + /* Reported by Andrey Rahmatullin */ UNUSUAL_DEV( 0x4102, 0x1020, 0x0100, 0x0100, "iRiver", diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h index 1b23741036ee8e913f6a8e508e754a94d78fb19e..37157ed9a881a358968d85803428f95786628fc5 100644 --- a/drivers/usb/storage/unusual_uas.h +++ b/drivers/usb/storage/unusual_uas.h @@ -28,6 +28,13 @@ * and don't forget to CC: the USB development list */ +/* Reported-by: Julian Groß */ +UNUSUAL_DEV(0x059f, 0x105f, 0x0000, 0x9999, + "LaCie", + "2Big Quadra USB3", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_NO_REPORT_OPCODES), + /* * Apricorn USB3 dongle sometimes returns "USBSUSBSUSBS" in response to SCSI * commands in UAS mode. Observed with the 1.28 firmware; are there others? diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index e93ce1cf0bef1c7a3e2004824495f30afa58e977..6c50de8fd10f219cd6828d63d40ff14fa2dbe049 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -387,8 +387,8 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr, vma = find_vma_intersection(mm, vaddr, vaddr + 1); if (vma && vma->vm_flags & VM_PFNMAP) { - *pfn = ((vaddr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; - if (is_invalid_reserved_pfn(*pfn)) + if (!follow_pfn(vma, vaddr, pfn) && + is_invalid_reserved_pfn(*pfn)) ret = 0; } @@ -600,7 +600,7 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data, continue; } - remote_vaddr = dma->vaddr + iova - dma->iova; + remote_vaddr = dma->vaddr + (iova - dma->iova); ret = vfio_pin_page_external(dma, remote_vaddr, &phys_pfn[i], do_accounting); if (ret) diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index 5f5c5de31f104a321496cdb3141e1e84fec608ea..bac1365cc81b5778352dd342147a8f5b5a6494e5 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c @@ -499,6 +499,11 @@ static int vhost_vsock_start(struct vhost_vsock *vsock) mutex_unlock(&vq->mutex); } + /* Some packets may have been queued before the device was started, + * let's kick the send worker to send them. + */ + vhost_work_queue(&vsock->dev, &vsock->send_pkt_work); + mutex_unlock(&vsock->dev.mutex); return 0; diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index b96d4e779333bf13f9a5a03c6d2baa3701a4c49e..cb93a6b3816092860f8ebb9173cb0963a3910dc9 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -1243,6 +1243,9 @@ static void fbcon_deinit(struct vc_data *vc) if (!con_is_bound(&fb_con)) fbcon_exit(); + if (vc->vc_num == logo_shown) + logo_shown = FBCON_LOGO_CANSHOW; + return; } diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c index c48f083d522a2485268c8eb80c80a92da214eda3..84845275dbef02a3ec8e6e52d587f2a378790153 100644 --- a/drivers/video/fbdev/core/fbmem.c +++ b/drivers/video/fbdev/core/fbmem.c @@ -1122,7 +1122,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd, case FBIOGET_FSCREENINFO: if (!lock_fb_info(info)) return -ENODEV; - fix = info->fix; + memcpy(&fix, &info->fix, sizeof(fix)); unlock_fb_info(info); ret = copy_to_user(argp, &fix, sizeof(fix)) ? -EFAULT : 0; diff --git a/drivers/video/fbdev/sis/init301.c b/drivers/video/fbdev/sis/init301.c index 27a2b72e50e84b45e5ba1b292daf4bd386442143..a8fb41f1a2580e02574f9a40cbc5d1919ed34fdb 100644 --- a/drivers/video/fbdev/sis/init301.c +++ b/drivers/video/fbdev/sis/init301.c @@ -848,9 +848,7 @@ SiS_PanelDelay(struct SiS_Private *SiS_Pr, unsigned short DelayTime) SiS_DDC2Delay(SiS_Pr, 0x4000); } - } else if((SiS_Pr->SiS_IF_DEF_LVDS == 1) /* || - (SiS_Pr->SiS_CustomT == CUT_COMPAQ1280) || - (SiS_Pr->SiS_CustomT == CUT_CLEVO1400) */ ) { /* 315 series, LVDS; Special */ + } else if (SiS_Pr->SiS_IF_DEF_LVDS == 1) { /* 315 series, LVDS; Special */ if(SiS_Pr->SiS_IF_DEF_CH70xx == 0) { PanelID = SiS_GetReg(SiS_Pr->SiS_P3d4,0x36); diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c index 072986d461b765df24c06a8ab09d3f6e2857c48d..d8876fba686dcf8b9847f061a5273e88eb2d199b 100644 --- a/drivers/watchdog/sp805_wdt.c +++ b/drivers/watchdog/sp805_wdt.c @@ -137,10 +137,14 @@ wdt_restart(struct watchdog_device *wdd, unsigned long mode, void *cmd) { struct sp805_wdt *wdt = watchdog_get_drvdata(wdd); + writel_relaxed(UNLOCK, wdt->base + WDTLOCK); writel_relaxed(0, wdt->base + WDTCONTROL); writel_relaxed(0, wdt->base + WDTLOAD); writel_relaxed(INT_ENABLE | RESET_ENABLE, wdt->base + WDTCONTROL); + /* Flush posted writes. */ + readl_relaxed(wdt->base + WDTLOCK); + return 0; } diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c index e64aa88e99dabaf5fff7ebf0d1a07e0b1e897584..10b2090f3e5e751eb7392fc39dc2e8c1eb82fa7b 100644 --- a/drivers/watchdog/watchdog_dev.c +++ b/drivers/watchdog/watchdog_dev.c @@ -264,6 +264,7 @@ static int watchdog_start(struct watchdog_device *wdd) if (err == 0) { set_bit(WDOG_ACTIVE, &wdd->status); wd_data->last_keepalive = started_at; + wd_data->last_hw_keepalive = started_at; watchdog_update_worker(wdd); } diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c index a1c17000129ba1cb4465df0bc679a0756f8937a3..e94a61eaeceb08a0172d50a3b8070f0c72ce9705 100644 --- a/drivers/xen/xenbus/xenbus_client.c +++ b/drivers/xen/xenbus/xenbus_client.c @@ -450,7 +450,14 @@ EXPORT_SYMBOL_GPL(xenbus_free_evtchn); int xenbus_map_ring_valloc(struct xenbus_device *dev, grant_ref_t *gnt_refs, unsigned int nr_grefs, void **vaddr) { - return ring_ops->map(dev, gnt_refs, nr_grefs, vaddr); + int err; + + err = ring_ops->map(dev, gnt_refs, nr_grefs, vaddr); + /* Some hypervisors are buggy and can return 1. */ + if (err > 0) + err = GNTST_general_error; + + return err; } EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc); diff --git a/fs/attr.c b/fs/attr.c index bea2f7cfd52c7e31447b50f08996ce72fc427a62..21071279e216d5d52ff5e712cbf083b64e4f89ef 100644 --- a/fs/attr.c +++ b/fs/attr.c @@ -345,7 +345,7 @@ int notify_change2(struct vfsmount *mnt, struct dentry * dentry, struct iattr * return error; } -EXPORT_SYMBOL(notify_change2); +EXPORT_SYMBOL_GPL(notify_change2); int notify_change(struct dentry * dentry, struct iattr * attr, struct inode **delegated_inode) { diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c index 02e4e903dfe9f1e36274a7724ff435ec873c1d69..f79c0cb7697ac3abf0665ddfb7fa18bbab726dd3 100644 --- a/fs/btrfs/async-thread.c +++ b/fs/btrfs/async-thread.c @@ -434,3 +434,11 @@ void btrfs_set_work_high_priority(struct btrfs_work *work) { set_bit(WORK_HIGH_PRIO_BIT, &work->flags); } + +void btrfs_flush_workqueue(struct btrfs_workqueue *wq) +{ + if (wq->high) + flush_workqueue(wq->high->normal_wq); + + flush_workqueue(wq->normal->normal_wq); +} diff --git a/fs/btrfs/async-thread.h b/fs/btrfs/async-thread.h index 7861c9feba5f4f0ceac82946de06ada2f24f19eb..e87be7c8be585f4821b9808a08f08cd16a6a0de1 100644 --- a/fs/btrfs/async-thread.h +++ b/fs/btrfs/async-thread.h @@ -73,5 +73,6 @@ void btrfs_set_work_high_priority(struct btrfs_work *work); struct btrfs_fs_info *btrfs_work_owner(const struct btrfs_work *work); struct btrfs_fs_info *btrfs_workqueue_owner(const struct __btrfs_workqueue *wq); bool btrfs_workqueue_normal_congested(const struct btrfs_workqueue *wq); +void btrfs_flush_workqueue(struct btrfs_workqueue *wq); #endif diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index e9522f2f25ccbc41370a6dba7ecac68a3b447a23..7374fb23381ca1096a1c29c48961a29a0057fe6b 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c @@ -6,6 +6,7 @@ #include #include +#include #include "delayed-inode.h" #include "disk-io.h" #include "transaction.h" @@ -801,11 +802,14 @@ static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans, struct btrfs_delayed_item *delayed_item) { struct extent_buffer *leaf; + unsigned int nofs_flag; char *ptr; int ret; + nofs_flag = memalloc_nofs_save(); ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key, delayed_item->data_len); + memalloc_nofs_restore(nofs_flag); if (ret < 0 && ret != -EEXIST) return ret; @@ -933,6 +937,7 @@ static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans, struct btrfs_delayed_node *node) { struct btrfs_delayed_item *curr, *prev; + unsigned int nofs_flag; int ret = 0; do_again: @@ -941,7 +946,9 @@ static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans, if (!curr) goto delete_fail; + nofs_flag = memalloc_nofs_save(); ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1); + memalloc_nofs_restore(nofs_flag); if (ret < 0) goto delete_fail; else if (ret > 0) { @@ -1008,6 +1015,7 @@ static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans, struct btrfs_key key; struct btrfs_inode_item *inode_item; struct extent_buffer *leaf; + unsigned int nofs_flag; int mod; int ret; @@ -1020,7 +1028,9 @@ static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans, else mod = 1; + nofs_flag = memalloc_nofs_save(); ret = btrfs_lookup_inode(trans, root, path, &key, mod); + memalloc_nofs_restore(nofs_flag); if (ret > 0) { btrfs_release_path(path); return -ENOENT; @@ -1071,7 +1081,10 @@ static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans, key.type = BTRFS_INODE_EXTREF_KEY; key.offset = -1; + + nofs_flag = memalloc_nofs_save(); ret = btrfs_search_slot(trans, root, &key, path, -1, 1); + memalloc_nofs_restore(nofs_flag); if (ret < 0) goto err_out; ASSERT(ret); diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index b5039b16de93a36ac59c1b6eddc072cb3bb21388..da7a2a5306474b094605968512522c8a0c6cbffb 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -3007,6 +3007,18 @@ int open_ctree(struct super_block *sb, fs_info->generation = generation; fs_info->last_trans_committed = generation; + /* + * If we have a uuid root and we're not being told to rescan we need to + * check the generation here so we can set the + * BTRFS_FS_UPDATE_UUID_TREE_GEN bit. Otherwise we could commit the + * transaction during a balance or the log replay without updating the + * uuid generation, and then if we crash we would rescan the uuid tree, + * even though it was perfectly fine. + */ + if (fs_info->uuid_root && !btrfs_test_opt(fs_info, RESCAN_UUID_TREE) && + fs_info->generation == btrfs_super_uuid_tree_generation(disk_super)) + set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags); + ret = btrfs_verify_dev_extents(fs_info); if (ret) { btrfs_err(fs_info, @@ -3237,8 +3249,6 @@ int open_ctree(struct super_block *sb, close_ctree(fs_info); return ret; } - } else { - set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags); } set_bit(BTRFS_FS_OPEN, &fs_info->flags); @@ -3949,6 +3959,19 @@ void close_ctree(struct btrfs_fs_info *fs_info) */ btrfs_delete_unused_bgs(fs_info); + /* + * There might be existing delayed inode workers still running + * and holding an empty delayed inode item. We must wait for + * them to complete first because they can create a transaction. + * This happens when someone calls btrfs_balance_delayed_items() + * and then a transaction commit runs the same delayed nodes + * before any delayed worker has done something with the nodes. + * We must wait for any worker here and not at transaction + * commit time since that could cause a deadlock. + * This is a very rare case. + */ + btrfs_flush_workqueue(fs_info->delayed_workers); + ret = btrfs_commit_super(fs_info); if (ret) btrfs_err(fs_info, "commit super ret %d", ret); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 47ca1ebda056d4af270f9790f73ed01596c827a5..271e70c45d5bd04fcaa2c304d3115916118ace99 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -10286,7 +10286,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, path = btrfs_alloc_path(); if (!path) { ret = -ENOMEM; - goto out; + goto out_put_group; } /* @@ -10323,7 +10323,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, ret = btrfs_orphan_add(trans, BTRFS_I(inode)); if (ret) { btrfs_add_delayed_iput(inode); - goto out; + goto out_put_group; } clear_nlink(inode); /* One for the block groups ref */ @@ -10346,13 +10346,13 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1); if (ret < 0) - goto out; + goto out_put_group; if (ret > 0) btrfs_release_path(path); if (ret == 0) { ret = btrfs_del_item(trans, tree_root, path); if (ret) - goto out; + goto out_put_group; btrfs_release_path(path); } @@ -10494,9 +10494,9 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, ret = remove_block_group_free_space(trans, block_group); if (ret) - goto out; + goto out_put_group; - btrfs_put_block_group(block_group); + /* Once for the block groups rbtree */ btrfs_put_block_group(block_group); ret = btrfs_search_slot(trans, root, &key, path, -1, 1); @@ -10524,6 +10524,10 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, /* once for the tree */ free_extent_map(em); } + +out_put_group: + /* Once for the lookup reference */ + btrfs_put_block_group(block_group); out: btrfs_free_path(path); return ret; diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index c2c93fe9d7fd5805c0b4f142f7774e84589e6a66..dc1841855a69abf770445f81ba8f54926d474936 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -2073,6 +2073,16 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) btrfs_init_log_ctx(&ctx, inode); + /* + * Set the range to full if the NO_HOLES feature is not enabled. + * This is to avoid missing file extent items representing holes after + * replaying the log. + */ + if (!btrfs_fs_incompat(fs_info, NO_HOLES)) { + start = 0; + end = LLONG_MAX; + } + /* * We write the dirty pages in the range and wait until they complete * out of the ->i_mutex. If so, we can flush the dirty pages by @@ -2127,6 +2137,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) */ ret = start_ordered_ops(inode, start, end); if (ret) { + up_write(&BTRFS_I(inode)->dio_sem); inode_unlock(inode); goto out; } diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index 0cd043f03081e1ad3df197b6bdbd1273d53bf9ad..cbd40826f5dcf8f9ff1f7d20277ab6beb0564f12 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -1032,6 +1032,7 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info) ret = qgroup_rescan_init(fs_info, 0, 1); if (!ret) { qgroup_rescan_zero_tracking(fs_info); + fs_info->qgroup_rescan_running = true; btrfs_queue_work(fs_info->qgroup_rescan_workers, &fs_info->qgroup_rescan_work); } @@ -2906,7 +2907,6 @@ qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid, sizeof(fs_info->qgroup_rescan_progress)); fs_info->qgroup_rescan_progress.objectid = progress_objectid; init_completion(&fs_info->qgroup_rescan_completion); - fs_info->qgroup_rescan_running = true; spin_unlock(&fs_info->qgroup_lock); mutex_unlock(&fs_info->qgroup_rescan_lock); @@ -2972,8 +2972,11 @@ btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info) qgroup_rescan_zero_tracking(fs_info); + mutex_lock(&fs_info->qgroup_rescan_lock); + fs_info->qgroup_rescan_running = true; btrfs_queue_work(fs_info->qgroup_rescan_workers, &fs_info->qgroup_rescan_work); + mutex_unlock(&fs_info->qgroup_rescan_lock); return 0; } @@ -3009,9 +3012,13 @@ int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info, void btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info) { - if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) + if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) { + mutex_lock(&fs_info->qgroup_rescan_lock); + fs_info->qgroup_rescan_running = true; btrfs_queue_work(fs_info->qgroup_rescan_workers, &fs_info->qgroup_rescan_work); + mutex_unlock(&fs_info->qgroup_rescan_lock); + } } /* diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index f98913061a40cfa52148cc94d0526cad873b22b5..eedcb7bf50e9ea2c8282978a066052ef9cad0eb8 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -525,8 +525,8 @@ static int should_ignore_root(struct btrfs_root *root) if (!reloc_root) return 0; - if (btrfs_root_last_snapshot(&reloc_root->root_item) == - root->fs_info->running_transaction->transid - 1) + if (btrfs_header_generation(reloc_root->commit_root) == + root->fs_info->running_transaction->transid) return 0; /* * if there is reloc tree and it was created in previous @@ -1141,7 +1141,7 @@ struct backref_node *build_backref_tree(struct reloc_control *rc, free_backref_node(cache, lower); } - free_backref_node(cache, node); + remove_backref_node(cache, node); return ERR_PTR(err); } ASSERT(!node || !node->detached); @@ -1253,7 +1253,7 @@ static int __must_check __add_reloc_root(struct btrfs_root *root) if (!node) return -ENOMEM; - node->bytenr = root->node->start; + node->bytenr = root->commit_root->start; node->data = root; spin_lock(&rc->reloc_root_tree.lock); @@ -1284,10 +1284,11 @@ static void __del_reloc_root(struct btrfs_root *root) if (rc && root->node) { spin_lock(&rc->reloc_root_tree.lock); rb_node = tree_search(&rc->reloc_root_tree.rb_root, - root->node->start); + root->commit_root->start); if (rb_node) { node = rb_entry(rb_node, struct mapping_node, rb_node); rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root); + RB_CLEAR_NODE(&node->rb_node); } spin_unlock(&rc->reloc_root_tree.lock); if (!node) @@ -1305,7 +1306,7 @@ static void __del_reloc_root(struct btrfs_root *root) * helper to update the 'address of tree root -> reloc tree' * mapping */ -static int __update_reloc_root(struct btrfs_root *root, u64 new_bytenr) +static int __update_reloc_root(struct btrfs_root *root) { struct btrfs_fs_info *fs_info = root->fs_info; struct rb_node *rb_node; @@ -1314,7 +1315,7 @@ static int __update_reloc_root(struct btrfs_root *root, u64 new_bytenr) spin_lock(&rc->reloc_root_tree.lock); rb_node = tree_search(&rc->reloc_root_tree.rb_root, - root->node->start); + root->commit_root->start); if (rb_node) { node = rb_entry(rb_node, struct mapping_node, rb_node); rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root); @@ -1326,7 +1327,7 @@ static int __update_reloc_root(struct btrfs_root *root, u64 new_bytenr) BUG_ON((struct btrfs_root *)node->data != root); spin_lock(&rc->reloc_root_tree.lock); - node->bytenr = new_bytenr; + node->bytenr = root->node->start; rb_node = tree_insert(&rc->reloc_root_tree.rb_root, node->bytenr, &node->rb_node); spin_unlock(&rc->reloc_root_tree.lock); @@ -1471,6 +1472,7 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans, } if (reloc_root->commit_root != reloc_root->node) { + __update_reloc_root(reloc_root); btrfs_set_root_node(root_item, reloc_root->node); free_extent_buffer(reloc_root->commit_root); reloc_root->commit_root = btrfs_root_node(reloc_root); @@ -2434,7 +2436,21 @@ void merge_reloc_roots(struct reloc_control *rc) free_reloc_roots(&reloc_roots); } - BUG_ON(!RB_EMPTY_ROOT(&rc->reloc_root_tree.rb_root)); + /* + * We used to have + * + * BUG_ON(!RB_EMPTY_ROOT(&rc->reloc_root_tree.rb_root)); + * + * here, but it's wrong. If we fail to start the transaction in + * prepare_to_merge() we will have only 0 ref reloc roots, none of which + * have actually been removed from the reloc_root_tree rb tree. This is + * fine because we're bailing here, and we hold a reference on the root + * for the list that holds it, so these roots will be cleaned up when we + * do the reloc_dirty_list afterwards. Meanwhile the root->reloc_root + * will be cleaned up on unmount. + * + * The remaining nodes will be cleaned up by free_reloc_control. + */ } static void free_block_list(struct rb_root *blocks) @@ -4585,11 +4601,6 @@ int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans, BUG_ON(rc->stage == UPDATE_DATA_PTRS && root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID); - if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) { - if (buf == root->node) - __update_reloc_root(root, cow->start); - } - level = btrfs_header_level(buf); if (btrfs_header_generation(buf) <= btrfs_root_last_snapshot(&root->root_item)) diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 4b1491e1b80320c0d196e8e1cdf13f629a70c220..8829d89eb4afffe5a8cd2c7bda4b7ff5746f78a9 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -572,10 +572,19 @@ start_transaction(struct btrfs_root *root, unsigned int num_items, } got_it: - btrfs_record_root_in_trans(h, root); - if (!current->journal_info) current->journal_info = h; + + /* + * btrfs_record_root_in_trans() needs to alloc new extents, and may + * call btrfs_join_transaction() while we're also starting a + * transaction. + * + * Thus it need to be called after current->journal_info initialized, + * or we can deadlock. + */ + btrfs_record_root_in_trans(h, root); + return h; join_fail: diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index d4c86c6cbe39537d2d59446d6292d006d97ae26a..928ac2c4899e752f2868d3e0e396780a406bb42e 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -4182,6 +4182,9 @@ static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans, const u64 ino = btrfs_ino(inode); struct btrfs_path *dst_path = NULL; bool dropped_extents = false; + u64 truncate_offset = i_size; + struct extent_buffer *leaf; + int slot; int ins_nr = 0; int start_slot; int ret; @@ -4196,9 +4199,43 @@ static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans, if (ret < 0) goto out; + /* + * We must check if there is a prealloc extent that starts before the + * i_size and crosses the i_size boundary. This is to ensure later we + * truncate down to the end of that extent and not to the i_size, as + * otherwise we end up losing part of the prealloc extent after a log + * replay and with an implicit hole if there is another prealloc extent + * that starts at an offset beyond i_size. + */ + ret = btrfs_previous_item(root, path, ino, BTRFS_EXTENT_DATA_KEY); + if (ret < 0) + goto out; + + if (ret == 0) { + struct btrfs_file_extent_item *ei; + + leaf = path->nodes[0]; + slot = path->slots[0]; + ei = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); + + if (btrfs_file_extent_type(leaf, ei) == + BTRFS_FILE_EXTENT_PREALLOC) { + u64 extent_end; + + btrfs_item_key_to_cpu(leaf, &key, slot); + extent_end = key.offset + + btrfs_file_extent_num_bytes(leaf, ei); + + if (extent_end > i_size) + truncate_offset = extent_end; + } + } else { + ret = 0; + } + while (true) { - struct extent_buffer *leaf = path->nodes[0]; - int slot = path->slots[0]; + leaf = path->nodes[0]; + slot = path->slots[0]; if (slot >= btrfs_header_nritems(leaf)) { if (ins_nr > 0) { @@ -4236,7 +4273,7 @@ static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans, ret = btrfs_truncate_inode_items(trans, root->log_root, &inode->vfs_inode, - i_size, + truncate_offset, BTRFS_EXTENT_DATA_KEY); } while (ret == -EAGAIN); if (ret) diff --git a/fs/buffer.c b/fs/buffer.c index d32364efd39d95a744be5143e2afb0d20221d9b7..9718f7d4ff932febaee46c7185640198ddbfde30 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -1337,6 +1337,17 @@ void __breadahead(struct block_device *bdev, sector_t block, unsigned size) } EXPORT_SYMBOL(__breadahead); +void __breadahead_gfp(struct block_device *bdev, sector_t block, unsigned size, + gfp_t gfp) +{ + struct buffer_head *bh = __getblk_gfp(bdev, block, size, gfp); + if (likely(bh)) { + ll_rw_block(REQ_OP_READ, REQ_RAHEAD, 1, &bh); + brelse(bh); + } +} +EXPORT_SYMBOL(__breadahead_gfp); + /** * __bread_gfp() - reads a specified block and returns the bh * @bdev: the block_device to read from diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index 4c0b220e20bab10acc9922122733dc7fa88f2d17..5241102b81a82ad89f61fe7370e16fe226cfa285 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -1972,8 +1972,12 @@ void ceph_check_caps(struct ceph_inode_info *ci, int flags, } /* want more caps from mds? */ - if (want & ~(cap->mds_wanted | cap->issued)) - goto ack; + if (want & ~cap->mds_wanted) { + if (want & ~(cap->mds_wanted | cap->issued)) + goto ack; + if (!__cap_is_valid(cap)) + goto ack; + } /* things we might delay */ if ((cap->issued & ~retain) == 0 && diff --git a/fs/ceph/export.c b/fs/ceph/export.c index 3c59ad180ef0bb5b9dfb09528e5c1dc363f9b94e..4cfe1154d4c726f621b5a6f689f7e7b05ca52a39 100644 --- a/fs/ceph/export.c +++ b/fs/ceph/export.c @@ -151,6 +151,11 @@ static struct dentry *__get_parent(struct super_block *sb, req->r_num_caps = 1; err = ceph_mdsc_do_request(mdsc, NULL, req); + if (err) { + ceph_mdsc_put_request(req); + return ERR_PTR(err); + } + inode = req->r_target_inode; if (inode) ihold(inode); diff --git a/fs/ceph/super.c b/fs/ceph/super.c index c4314f4492401453debfa4df6bf520dad0ffec10..18e967089aeb0d56d4b14385ce4d4b2855e1bd01 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -105,7 +105,6 @@ static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf) return 0; } - static int ceph_sync_fs(struct super_block *sb, int wait) { struct ceph_fs_client *fsc = ceph_sb_to_client(sb); @@ -206,6 +205,26 @@ static match_table_t fsopt_tokens = { {-1, NULL} }; +/* + * Remove adjacent slashes and then the trailing slash, unless it is + * the only remaining character. + * + * E.g. "//dir1////dir2///" --> "/dir1/dir2", "///" --> "/". + */ +static void canonicalize_path(char *path) +{ + int i, j = 0; + + for (i = 0; path[i] != '\0'; i++) { + if (path[i] != '/' || j < 1 || path[j - 1] != '/') + path[j++] = path[i]; + } + + if (j > 1 && path[j - 1] == '/') + j--; + path[j] = '\0'; +} + static int parse_fsopt_token(char *c, void *private) { struct ceph_mount_options *fsopt = private; @@ -415,12 +434,15 @@ static int compare_mount_options(struct ceph_mount_options *new_fsopt, ret = strcmp_null(fsopt1->snapdir_name, fsopt2->snapdir_name); if (ret) return ret; + ret = strcmp_null(fsopt1->mds_namespace, fsopt2->mds_namespace); if (ret) return ret; + ret = strcmp_null(fsopt1->server_path, fsopt2->server_path); if (ret) return ret; + ret = strcmp_null(fsopt1->fscache_uniq, fsopt2->fscache_uniq); if (ret) return ret; @@ -476,13 +498,17 @@ static int parse_mount_options(struct ceph_mount_options **pfsopt, */ dev_name_end = strchr(dev_name, '/'); if (dev_name_end) { - if (strlen(dev_name_end) > 1) { - fsopt->server_path = kstrdup(dev_name_end, GFP_KERNEL); - if (!fsopt->server_path) { - err = -ENOMEM; - goto out; - } + /* + * The server_path will include the whole chars from userland + * including the leading '/'. + */ + fsopt->server_path = kstrdup(dev_name_end, GFP_KERNEL); + if (!fsopt->server_path) { + err = -ENOMEM; + goto out; } + + canonicalize_path(fsopt->server_path); } else { dev_name_end = dev_name + strlen(dev_name); } @@ -810,7 +836,6 @@ static void destroy_caches(void) ceph_fscache_unregister(); } - /* * ceph_umount_begin - initiate forced umount. Tear down down the * mount, skipping steps that may hang while waiting for server(s). @@ -897,9 +922,6 @@ static struct dentry *open_root_dentry(struct ceph_fs_client *fsc, return root; } - - - /* * mount: join the ceph cluster, and open root directory. */ @@ -913,7 +935,9 @@ static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc) mutex_lock(&fsc->client->mount_mutex); if (!fsc->sb->s_root) { - const char *path; + const char *path = fsc->mount_options->server_path ? + fsc->mount_options->server_path + 1 : ""; + err = __ceph_open_session(fsc->client, started); if (err < 0) goto out; @@ -925,13 +949,7 @@ static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc) goto out; } - if (!fsc->mount_options->server_path) { - path = ""; - dout("mount opening path \\t\n"); - } else { - path = fsc->mount_options->server_path + 1; - dout("mount opening path %s\n", path); - } + dout("mount opening path '%s'\n", path); err = ceph_fs_debugfs_init(fsc); if (err < 0) diff --git a/fs/ceph/super.h b/fs/ceph/super.h index 8d3eabf06d66aeb7f5fe273f726b27075f9a5441..65da12ff5449f3d590890a990df0b028aa76c9d8 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -86,7 +86,7 @@ struct ceph_mount_options { char *snapdir_name; /* default ".snap" */ char *mds_namespace; /* default NULL */ - char *server_path; /* default "/" */ + char *server_path; /* default NULL (means "/") */ char *fscache_uniq; /* default NULL */ }; diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 975f800b9dd4d169dece68e33a1c3bb0a05859e8..9e569d60c636beb9989e056641d079e3714d4988 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -353,8 +353,10 @@ static int reconn_set_ipaddr(struct TCP_Server_Info *server) return rc; } + spin_lock(&cifs_tcp_ses_lock); rc = cifs_convert_address((struct sockaddr *)&server->dstaddr, ipaddr, strlen(ipaddr)); + spin_unlock(&cifs_tcp_ses_lock); kfree(ipaddr); return !rc ? -1 : 0; diff --git a/fs/cifs/file.c b/fs/cifs/file.c index ad93b063f86656b602d9d5fc293d701b07c63dda..cfb0d91289ec76620add04e8b2d96ef49d466be9 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -3339,7 +3339,7 @@ collect_uncached_read_data(struct cifs_aio_ctx *ctx) if (rc == -ENODATA) rc = 0; - ctx->rc = (rc == 0) ? ctx->total_len : rc; + ctx->rc = (rc == 0) ? (ssize_t)ctx->total_len : rc; mutex_unlock(&ctx->aio_mutex); diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index 0c4df56c825abf30fd8bc9af035010eb32f6f2e0..70412944b267d9a0591c56ae7c1bf89bce02ff6a 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c @@ -392,7 +392,7 @@ smb_send_rqst(struct TCP_Server_Info *server, int num_rqst, struct smb_rqst *rqst, int flags) { struct kvec iov; - struct smb2_transform_hdr tr_hdr; + struct smb2_transform_hdr *tr_hdr; struct smb_rqst cur_rqst[MAX_COMPOUND]; int rc; @@ -402,28 +402,34 @@ smb_send_rqst(struct TCP_Server_Info *server, int num_rqst, if (num_rqst > MAX_COMPOUND - 1) return -ENOMEM; - memset(&cur_rqst[0], 0, sizeof(cur_rqst)); - memset(&iov, 0, sizeof(iov)); - memset(&tr_hdr, 0, sizeof(tr_hdr)); - - iov.iov_base = &tr_hdr; - iov.iov_len = sizeof(tr_hdr); - cur_rqst[0].rq_iov = &iov; - cur_rqst[0].rq_nvec = 1; - if (!server->ops->init_transform_rq) { cifs_dbg(VFS, "Encryption requested but transform callback " "is missing\n"); return -EIO; } + tr_hdr = kmalloc(sizeof(*tr_hdr), GFP_NOFS); + if (!tr_hdr) + return -ENOMEM; + + memset(&cur_rqst[0], 0, sizeof(cur_rqst)); + memset(&iov, 0, sizeof(iov)); + memset(tr_hdr, 0, sizeof(*tr_hdr)); + + iov.iov_base = tr_hdr; + iov.iov_len = sizeof(*tr_hdr); + cur_rqst[0].rq_iov = &iov; + cur_rqst[0].rq_nvec = 1; + rc = server->ops->init_transform_rq(server, num_rqst + 1, &cur_rqst[0], rqst); if (rc) - return rc; + goto out; rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]); smb3_free_compound_rqst(num_rqst, &cur_rqst[1]); +out: + kfree(tr_hdr); return rc; } diff --git a/fs/coredump.c b/fs/coredump.c index d9b3ba086d92339ffe7c0735dbe1f46009009ff1..6250eaf9b4d50329f693b05747ab8b4232b0cc19 100644 --- a/fs/coredump.c +++ b/fs/coredump.c @@ -753,6 +753,14 @@ void do_coredump(const siginfo_t *siginfo) if (displaced) put_files_struct(displaced); if (!dump_interrupted()) { + /* + * umh disabled with CONFIG_STATIC_USERMODEHELPER_PATH="" would + * have this set to NULL. + */ + if (!cprm.file) { + pr_info("Core dump to |%s disabled\n", cn.corename); + goto close_fail; + } file_start_write(cprm.file); core_dumped = binfmt->core_dump(&cprm); file_end_write(cprm.file); diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c index 63bfe5e8accd2814f49159d541727a77f8fbb2a4..4023456e02084478073b57633f2f0f4c95efbd9f 100644 --- a/fs/crypto/fname.c +++ b/fs/crypto/fname.c @@ -583,4 +583,4 @@ int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags) return valid; } -EXPORT_SYMBOL(fscrypt_d_revalidate); +EXPORT_SYMBOL_GPL(fscrypt_d_revalidate); diff --git a/fs/crypto/inline_crypt.c b/fs/crypto/inline_crypt.c index eaa2014ec84b5fd1f4df2877fc4911df97ab4391..aebc8ecb7caae840cae23fd0aae0e25788212d29 100644 --- a/fs/crypto/inline_crypt.c +++ b/fs/crypto/inline_crypt.c @@ -41,6 +41,24 @@ static void fscrypt_get_devices(struct super_block *sb, int num_devs, sb->s_cop->get_devices(sb, devs); } +static unsigned int fscrypt_get_dun_bytes(const struct fscrypt_info *ci) +{ + struct super_block *sb = ci->ci_inode->i_sb; + unsigned int flags = fscrypt_policy_flags(&ci->ci_policy); + int ino_bits = 64, lblk_bits = 64; + + if (flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) + return offsetofend(union fscrypt_iv, nonce); + + if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64) + return sizeof(__le64); + + /* Default case: IVs are just the file logical block number */ + if (sb->s_cop->get_ino_and_lblk_bits) + sb->s_cop->get_ino_and_lblk_bits(sb, &ino_bits, &lblk_bits); + return DIV_ROUND_UP(lblk_bits, 8); +} + /* Enable inline encryption for this file if supported. */ int fscrypt_select_encryption_impl(struct fscrypt_info *ci, bool is_hw_wrapped_key) @@ -48,6 +66,7 @@ int fscrypt_select_encryption_impl(struct fscrypt_info *ci, const struct inode *inode = ci->ci_inode; struct super_block *sb = inode->i_sb; enum blk_crypto_mode_num crypto_mode = ci->ci_mode->blk_crypto_mode; + unsigned int dun_bytes; struct request_queue **devs; int num_devs; int i; @@ -83,9 +102,12 @@ int fscrypt_select_encryption_impl(struct fscrypt_info *ci, fscrypt_get_devices(sb, num_devs, devs); + dun_bytes = fscrypt_get_dun_bytes(ci); + for (i = 0; i < num_devs; i++) { if (!keyslot_manager_crypto_mode_supported(devs[i]->ksm, crypto_mode, + dun_bytes, sb->s_blocksize, is_hw_wrapped_key)) goto out_free_devs; @@ -106,6 +128,7 @@ int fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key, const struct inode *inode = ci->ci_inode; struct super_block *sb = inode->i_sb; enum blk_crypto_mode_num crypto_mode = ci->ci_mode->blk_crypto_mode; + unsigned int dun_bytes; int num_devs; int queue_refs = 0; struct fscrypt_blk_crypto_key *blk_key; @@ -123,11 +146,14 @@ int fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key, blk_key->num_devs = num_devs; fscrypt_get_devices(sb, num_devs, blk_key->devs); + dun_bytes = fscrypt_get_dun_bytes(ci); + BUILD_BUG_ON(FSCRYPT_MAX_HW_WRAPPED_KEY_SIZE > BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE); err = blk_crypto_init_key(&blk_key->base, raw_key, raw_key_size, - is_hw_wrapped, crypto_mode, sb->s_blocksize); + is_hw_wrapped, crypto_mode, dun_bytes, + sb->s_blocksize); if (err) { fscrypt_err(inode, "error %d initializing blk-crypto key", err); goto fail; @@ -148,7 +174,8 @@ int fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key, } queue_refs++; - err = blk_crypto_start_using_mode(crypto_mode, sb->s_blocksize, + err = blk_crypto_start_using_mode(crypto_mode, dun_bytes, + sb->s_blocksize, is_hw_wrapped, blk_key->devs[i]); if (err) { diff --git a/fs/exec.c b/fs/exec.c index aa9d20cbd689b50ffe17a068b908eae1e00a422e..9672ad40e4a6cf15930355246dcb04d4c6e58cfe 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -1378,7 +1378,7 @@ void setup_new_exec(struct linux_binprm * bprm) /* An exec changes our domain. We are no longer part of the thread group */ - current->self_exec_id++; + WRITE_ONCE(current->self_exec_id, current->self_exec_id + 1); flush_signal_handlers(current, 0); } EXPORT_SYMBOL(setup_new_exec); diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c index dd8f10db82e992da8b8f4fb37699d7bf4a87cf4d..bd1d68ff3a9f81ffecb3c4138cd646532ba3733c 100644 --- a/fs/ext2/xattr.c +++ b/fs/ext2/xattr.c @@ -56,6 +56,7 @@ #include #include +#include #include #include #include @@ -84,8 +85,8 @@ printk("\n"); \ } while (0) #else -# define ea_idebug(f...) -# define ea_bdebug(f...) +# define ea_idebug(inode, f...) no_printk(f) +# define ea_bdebug(bh, f...) no_printk(f) #endif static int ext2_xattr_set2(struct inode *, struct buffer_head *, @@ -838,8 +839,7 @@ ext2_xattr_cache_insert(struct mb_cache *cache, struct buffer_head *bh) error = mb_cache_entry_create(cache, GFP_NOFS, hash, bh->b_blocknr, 1); if (error) { if (error == -EBUSY) { - ea_bdebug(bh, "already in cache (%d cache entries)", - atomic_read(&ext2_xattr_cache->c_entry_count)); + ea_bdebug(bh, "already in cache"); error = 0; } } else diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index cf6bd0dc4c4828811cd45c31c75cd2074f013673..243a99dd7e28e3d4617f5153295c789350678fad 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -498,6 +498,30 @@ int ext4_ext_check_inode(struct inode *inode) return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode), 0); } +static void ext4_cache_extents(struct inode *inode, + struct ext4_extent_header *eh) +{ + struct ext4_extent *ex = EXT_FIRST_EXTENT(eh); + ext4_lblk_t prev = 0; + int i; + + for (i = le16_to_cpu(eh->eh_entries); i > 0; i--, ex++) { + unsigned int status = EXTENT_STATUS_WRITTEN; + ext4_lblk_t lblk = le32_to_cpu(ex->ee_block); + int len = ext4_ext_get_actual_len(ex); + + if (prev && (prev != lblk)) + ext4_es_cache_extent(inode, prev, lblk - prev, ~0, + EXTENT_STATUS_HOLE); + + if (ext4_ext_is_unwritten(ex)) + status = EXTENT_STATUS_UNWRITTEN; + ext4_es_cache_extent(inode, lblk, len, + ext4_ext_pblock(ex), status); + prev = lblk + len; + } +} + static struct buffer_head * __read_extent_tree_block(const char *function, unsigned int line, struct inode *inode, ext4_fsblk_t pblk, int depth, @@ -532,26 +556,7 @@ __read_extent_tree_block(const char *function, unsigned int line, */ if (!(flags & EXT4_EX_NOCACHE) && depth == 0) { struct ext4_extent_header *eh = ext_block_hdr(bh); - struct ext4_extent *ex = EXT_FIRST_EXTENT(eh); - ext4_lblk_t prev = 0; - int i; - - for (i = le16_to_cpu(eh->eh_entries); i > 0; i--, ex++) { - unsigned int status = EXTENT_STATUS_WRITTEN; - ext4_lblk_t lblk = le32_to_cpu(ex->ee_block); - int len = ext4_ext_get_actual_len(ex); - - if (prev && (prev != lblk)) - ext4_es_cache_extent(inode, prev, - lblk - prev, ~0, - EXTENT_STATUS_HOLE); - - if (ext4_ext_is_unwritten(ex)) - status = EXTENT_STATUS_UNWRITTEN; - ext4_es_cache_extent(inode, lblk, len, - ext4_ext_pblock(ex), status); - prev = lblk + len; - } + ext4_cache_extents(inode, eh); } return bh; errout: @@ -899,6 +904,8 @@ ext4_find_extent(struct inode *inode, ext4_lblk_t block, path[0].p_bh = NULL; i = depth; + if (!(flags & EXT4_EX_NOCACHE) && depth == 0) + ext4_cache_extents(inode, eh); /* walk through the tree */ while (i) { ext_debug("depth %d: num %d, max %d\n", @@ -3438,8 +3445,8 @@ static int ext4_ext_convert_to_initialized(handle_t *handle, (unsigned long long)map->m_lblk, map_len); sbi = EXT4_SB(inode->i_sb); - eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >> - inode->i_sb->s_blocksize_bits; + eof_block = (EXT4_I(inode)->i_disksize + inode->i_sb->s_blocksize - 1) + >> inode->i_sb->s_blocksize_bits; if (eof_block < map->m_lblk + map_len) eof_block = map->m_lblk + map_len; @@ -3694,8 +3701,8 @@ static int ext4_split_convert_extents(handle_t *handle, __func__, inode->i_ino, (unsigned long long)map->m_lblk, map->m_len); - eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >> - inode->i_sb->s_blocksize_bits; + eof_block = (EXT4_I(inode)->i_disksize + inode->i_sb->s_blocksize - 1) + >> inode->i_sb->s_blocksize_bits; if (eof_block < map->m_lblk + map->m_len) eof_block = map->m_lblk + map->m_len; /* diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c index cbb65e62e4012dbeba2d510eb7c569272e982308..e536694308c17acb63aa1393a5f03ce8d94194cc 100644 --- a/fs/ext4/ialloc.c +++ b/fs/ext4/ialloc.c @@ -665,7 +665,7 @@ static int find_group_other(struct super_block *sb, struct inode *parent, * block has been written back to disk. (Yes, these values are * somewhat arbitrary...) */ -#define RECENTCY_MIN 5 +#define RECENTCY_MIN 60 #define RECENTCY_DIRTY 300 static int recently_deleted(struct super_block *sb, ext4_group_t group, int ino) diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index deb722e8e8828cf51139d345f2b0481bdfe321d6..9be687b66d1738eed08b684fbd02c10b3f486151 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -2149,7 +2149,7 @@ static int ext4_writepage(struct page *page, bool keep_towrite = false; if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) { - ext4_invalidatepage(page, 0, PAGE_SIZE); + inode->i_mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE); unlock_page(page); return -EIO; } @@ -4761,7 +4761,7 @@ static int __ext4_get_inode_loc(struct inode *inode, if (end > table) end = table; while (b <= end) - sb_breadahead(sb, b++); + sb_breadahead_unmovable(sb, b++); } /* @@ -5220,7 +5220,7 @@ static int ext4_inode_blocks_set(handle_t *handle, struct ext4_inode_info *ei) { struct inode *inode = &(ei->vfs_inode); - u64 i_blocks = inode->i_blocks; + u64 i_blocks = READ_ONCE(inode->i_blocks); struct super_block *sb = inode->i_sb; if (i_blocks <= ~0U) { diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 71121fcf9e8ccdaa232744f12535940158cc2d1c..8dd54a8a0361029eb552a57b210a591d0c112071 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -1936,7 +1936,8 @@ void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac, int free; free = e4b->bd_info->bb_free; - BUG_ON(free <= 0); + if (WARN_ON(free <= 0)) + return; i = e4b->bd_info->bb_first_free; @@ -1959,7 +1960,8 @@ void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac, } mb_find_extent(e4b, i, ac->ac_g_ex.fe_len, &ex); - BUG_ON(ex.fe_len <= 0); + if (WARN_ON(ex.fe_len <= 0)) + break; if (free < ex.fe_len) { ext4_grp_locked_error(sb, e4b->bd_group, 0, 0, "%d free clusters as per " diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 21e3f28b02bb47edc162ed2d666054f402d73020..ff5aa914acf9a1b25c6063bd560b6cabf08f391d 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -389,7 +389,8 @@ static void save_error_info(struct super_block *sb, const char *func, unsigned int line) { __save_error_info(sb, func, line); - ext4_commit_super(sb, 1); + if (!bdev_read_only(sb->s_bdev)) + ext4_commit_super(sb, 1); } /* @@ -3600,7 +3601,8 @@ int ext4_calculate_overhead(struct super_block *sb) */ if (sbi->s_journal && !sbi->journal_bdev) overhead += EXT4_NUM_B2C(sbi, sbi->s_journal->j_maxlen); - else if (ext4_has_feature_journal(sb) && !sbi->s_journal) { + else if (ext4_has_feature_journal(sb) && !sbi->s_journal && j_inum) { + /* j_inum for internal journal is non-zero */ j_inode = ext4_get_journal_inode(sb, j_inum); if (j_inode) { j_blocks = j_inode->i_size >> sb->s_blocksize_bits; @@ -4145,7 +4147,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) if (sbi->s_inodes_per_group < sbi->s_inodes_per_block || sbi->s_inodes_per_group > blocksize * 8) { ext4_msg(sb, KERN_ERR, "invalid inodes per group: %lu\n", - sbi->s_blocks_per_group); + sbi->s_inodes_per_group); goto failed_mount; } sbi->s_itb_per_group = sbi->s_inodes_per_group / @@ -4276,9 +4278,9 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) EXT4_BLOCKS_PER_GROUP(sb) - 1); do_div(blocks_count, EXT4_BLOCKS_PER_GROUP(sb)); if (blocks_count > ((uint64_t)1<<32) - EXT4_DESC_PER_BLOCK(sb)) { - ext4_msg(sb, KERN_WARNING, "groups count too large: %u " + ext4_msg(sb, KERN_WARNING, "groups count too large: %llu " "(block count %llu, first data block %u, " - "blocks per group %lu)", sbi->s_groups_count, + "blocks per group %lu)", blocks_count, ext4_blocks_count(es), le32_to_cpu(es->s_first_data_block), EXT4_BLOCKS_PER_GROUP(sb)); @@ -4321,7 +4323,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) /* Pre-read the descriptors into the buffer cache */ for (i = 0; i < db_count; i++) { block = descriptor_loc(sb, logical_sb_block, i); - sb_breadahead(sb, block); + sb_breadahead_unmovable(sb, block); } for (i = 0; i < db_count; i++) { diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index 852890b72d6acd62bf63bef69c38520f674f0b98..5ba649e17c72be1dc9a64ee1972b1825e3bb0cdb 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -86,6 +86,8 @@ static struct page *__get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index, return ERR_PTR(err); } + f2fs_update_iostat(sbi, FS_META_READ_IO, F2FS_BLKSIZE); + lock_page(page); if (unlikely(page->mapping != mapping)) { f2fs_put_page(page, 1); @@ -220,6 +222,7 @@ int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, .is_por = (type == META_POR), }; struct blk_plug plug; + int err; if (unlikely(type == META_POR)) fio.op_flags &= ~REQ_META; @@ -263,8 +266,11 @@ int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, } fio.page = page; - f2fs_submit_page_bio(&fio); - f2fs_put_page(page, 0); + err = f2fs_submit_page_bio(&fio); + f2fs_put_page(page, err ? 1 : 0); + + if (!err) + f2fs_update_iostat(sbi, FS_META_READ_IO, F2FS_BLKSIZE); } out: blk_finish_plug(&plug); diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c index f839caf7ebf17ec1fc0c9e7ed29de10dc6e69fd0..3e06301de1de6db790cb159b648a7a694ee12f66 100644 --- a/fs/f2fs/compress.c +++ b/fs/f2fs/compress.c @@ -985,7 +985,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc, loff_t psize; int i, err; - if (!f2fs_trylock_op(sbi)) + if (!IS_NOQUOTA(inode) && !f2fs_trylock_op(sbi)) return -EAGAIN; set_new_dnode(&dn, cc->inode, NULL, NULL, 0); @@ -1093,7 +1093,8 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc, set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN); f2fs_put_dnode(&dn); - f2fs_unlock_op(sbi); + if (!IS_NOQUOTA(inode)) + f2fs_unlock_op(sbi); spin_lock(&fi->i_size_lock); if (fi->last_disk_size < psize) @@ -1119,7 +1120,8 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc, out_put_dnode: f2fs_put_dnode(&dn); out_unlock_op: - f2fs_unlock_op(sbi); + if (!IS_NOQUOTA(inode)) + f2fs_unlock_op(sbi); return -EAGAIN; } diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index c844f3141c487c0f65dbfdcd675549fa364da487..a6d69508a090f7efa09926438b6dd0287528c640 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -579,6 +579,28 @@ void f2fs_submit_bio(struct f2fs_sb_info *sbi, __submit_bio(sbi, bio, type); } +static void __attach_data_io_flag(struct f2fs_io_info *fio) +{ + struct f2fs_sb_info *sbi = fio->sbi; + unsigned int temp_mask = (1 << NR_TEMP_TYPE) - 1; + unsigned int fua_flag = sbi->data_io_flag & temp_mask; + unsigned int meta_flag = (sbi->data_io_flag >> NR_TEMP_TYPE) & + temp_mask; + /* + * data io flag bits per temp: + * REQ_META | REQ_FUA | + * 5 | 4 | 3 | 2 | 1 | 0 | + * Cold | Warm | Hot | Cold | Warm | Hot | + */ + if (fio->type != DATA) + return; + + if ((1 << fio->temp) & meta_flag) + fio->op_flags |= REQ_META; + if ((1 << fio->temp) & fua_flag) + fio->op_flags |= REQ_FUA; +} + static void __submit_merged_bio(struct f2fs_bio_info *io) { struct f2fs_io_info *fio = &io->fio; @@ -586,6 +608,7 @@ static void __submit_merged_bio(struct f2fs_bio_info *io) if (!io->bio) return; + __attach_data_io_flag(fio); bio_set_op_attrs(io->bio, fio->op, fio->op_flags); if (is_read_io(fio->op)) @@ -1096,6 +1119,7 @@ static int f2fs_submit_page_read(struct inode *inode, struct page *page, } ClearPageError(page); inc_page_count(sbi, F2FS_RD_DATA); + f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE); __f2fs_submit_read_bio(sbi, bio, DATA); return 0; } @@ -2102,6 +2126,7 @@ static int f2fs_read_single_page(struct inode *inode, struct page *page, goto submit_and_realloc; inc_page_count(F2FS_I_SB(inode), F2FS_RD_DATA); + f2fs_update_iostat(F2FS_I_SB(inode), FS_DATA_READ_IO, F2FS_BLKSIZE); ClearPageError(page); *last_block_in_bio = block_nr; goto out; @@ -2238,6 +2263,7 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret, goto submit_and_realloc; inc_page_count(sbi, F2FS_RD_DATA); + f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE); ClearPageError(page); *last_block_in_bio = blkaddr; } @@ -2724,8 +2750,8 @@ int f2fs_write_single_data_page(struct page *page, int *submitted, f2fs_available_free_memory(sbi, BASE_CHECK)))) goto redirty_out; - /* Dentry blocks are controlled by checkpoint */ - if (S_ISDIR(inode->i_mode)) { + /* Dentry/quota blocks are controlled by checkpoint */ + if (S_ISDIR(inode->i_mode) || IS_NOQUOTA(inode)) { fio.need_lock = LOCK_DONE; err = f2fs_do_write_data_page(&fio); goto done; @@ -3628,6 +3654,9 @@ static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) } else if (err < 0) { f2fs_write_failed(mapping, offset + count); } + } else { + if (err > 0) + f2fs_update_iostat(sbi, APP_DIRECT_READ_IO, err); } out: diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 04e639acd8cd93fbff34e6a01ed4ce27df583a90..af20ebe566af948adcd6da029e9d6a4440c1a637 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -1090,8 +1090,9 @@ enum cp_reason_type { }; enum iostat_type { - APP_DIRECT_IO, /* app direct IOs */ - APP_BUFFERED_IO, /* app buffered IOs */ + /* WRITE IO */ + APP_DIRECT_IO, /* app direct write IOs */ + APP_BUFFERED_IO, /* app buffered write IOs */ APP_WRITE_IO, /* app write IOs */ APP_MAPPED_IO, /* app mapped IOs */ FS_DATA_IO, /* data IOs from kworker/fsync/reclaimer */ @@ -1102,6 +1103,17 @@ enum iostat_type { FS_CP_DATA_IO, /* data IOs from checkpoint */ FS_CP_NODE_IO, /* node IOs from checkpoint */ FS_CP_META_IO, /* meta IOs from checkpoint */ + + /* READ IO */ + APP_DIRECT_READ_IO, /* app direct read IOs */ + APP_BUFFERED_READ_IO, /* app buffered read IOs */ + APP_READ_IO, /* app read IOs */ + APP_MAPPED_READ_IO, /* app mapped read IOs */ + FS_DATA_READ_IO, /* data read IOs */ + FS_NODE_READ_IO, /* node read IOs */ + FS_META_READ_IO, /* meta read IOs */ + + /* other */ FS_DISCARD, /* discard */ NR_IO_TYPE, }; @@ -1502,8 +1514,14 @@ struct f2fs_sb_info { /* For app/fs IO statistics */ spinlock_t iostat_lock; - unsigned long long write_iostat[NR_IO_TYPE]; + unsigned long long rw_iostat[NR_IO_TYPE]; + unsigned long long prev_rw_iostat[NR_IO_TYPE]; bool iostat_enable; + unsigned long iostat_next_period; + unsigned int iostat_period_ms; + + /* to attach REQ_META|REQ_FUA flags */ + unsigned int data_io_flag; /* For sysfs suppport */ struct kobject s_kobj; @@ -2995,29 +3013,45 @@ static inline int get_inline_xattr_addrs(struct inode *inode) sizeof((f2fs_inode)->field)) \ <= (F2FS_OLD_ATTRIBUTE_SIZE + (extra_isize))) \ +#define DEFAULT_IOSTAT_PERIOD_MS 3000 +#define MIN_IOSTAT_PERIOD_MS 100 +/* maximum period of iostat tracing is 1 day */ +#define MAX_IOSTAT_PERIOD_MS 8640000 + static inline void f2fs_reset_iostat(struct f2fs_sb_info *sbi) { int i; spin_lock(&sbi->iostat_lock); - for (i = 0; i < NR_IO_TYPE; i++) - sbi->write_iostat[i] = 0; + for (i = 0; i < NR_IO_TYPE; i++) { + sbi->rw_iostat[i] = 0; + sbi->prev_rw_iostat[i] = 0; + } spin_unlock(&sbi->iostat_lock); } +extern void f2fs_record_iostat(struct f2fs_sb_info *sbi); + static inline void f2fs_update_iostat(struct f2fs_sb_info *sbi, enum iostat_type type, unsigned long long io_bytes) { if (!sbi->iostat_enable) return; spin_lock(&sbi->iostat_lock); - sbi->write_iostat[type] += io_bytes; + sbi->rw_iostat[type] += io_bytes; if (type == APP_WRITE_IO || type == APP_DIRECT_IO) - sbi->write_iostat[APP_BUFFERED_IO] = - sbi->write_iostat[APP_WRITE_IO] - - sbi->write_iostat[APP_DIRECT_IO]; + sbi->rw_iostat[APP_BUFFERED_IO] = + sbi->rw_iostat[APP_WRITE_IO] - + sbi->rw_iostat[APP_DIRECT_IO]; + + if (type == APP_READ_IO || type == APP_DIRECT_READ_IO) + sbi->rw_iostat[APP_BUFFERED_READ_IO] = + sbi->rw_iostat[APP_READ_IO] - + sbi->rw_iostat[APP_DIRECT_READ_IO]; spin_unlock(&sbi->iostat_lock); + + f2fs_record_iostat(sbi); } #define __is_large_section(sbi) ((sbi)->segs_per_sec > 1) diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index e85691a6ecb98433556ffedf0a30e7af8332c7ee..643220edbbe737912fab4044aaa508a655c1a4e5 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -41,6 +41,10 @@ static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf) ret = filemap_fault(vmf); up_read(&F2FS_I(inode)->i_mmap_sem); + if (!ret) + f2fs_update_iostat(F2FS_I_SB(inode), APP_MAPPED_READ_IO, + F2FS_BLKSIZE); + trace_f2fs_filemap_fault(inode, vmf->pgoff, (unsigned long)ret); return ret; @@ -3525,11 +3529,17 @@ static ssize_t f2fs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) { struct file *file = iocb->ki_filp; struct inode *inode = file_inode(file); + int ret; if (!f2fs_is_compress_backend_ready(inode)) return -EOPNOTSUPP; - return generic_file_read_iter(iocb, iter); + ret = generic_file_read_iter(iocb, iter); + + if (ret > 0) + f2fs_update_iostat(F2FS_I_SB(inode), APP_READ_IO, ret); + + return ret; } static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c index 26248c8936db0f121ead3384215103399c8bb2dd..28a8c79c8bdc3076c06bdc04ebb2754af991fe22 100644 --- a/fs/f2fs/gc.c +++ b/fs/f2fs/gc.c @@ -737,6 +737,9 @@ static int ra_data_block(struct inode *inode, pgoff_t index) goto put_encrypted_page; f2fs_put_page(fio.encrypted_page, 0); f2fs_put_page(page, 1); + + f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE); + return 0; put_encrypted_page: f2fs_put_page(fio.encrypted_page, 1); @@ -840,6 +843,9 @@ static int move_data_block(struct inode *inode, block_t bidx, f2fs_put_page(mpage, 1); goto up_out; } + + f2fs_update_iostat(fio.sbi, FS_DATA_READ_IO, F2FS_BLKSIZE); + lock_page(mpage); if (unlikely(mpage->mapping != META_MAPPING(fio.sbi) || !PageUptodate(mpage))) { diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index 29d4ee7e6ea0b9b680fb67e87091febd3eae813f..68422396e620092039ad35fae1800283355c14c0 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -1298,7 +1298,13 @@ static int read_node_page(struct page *page, int op_flags) } fio.new_blkaddr = fio.old_blkaddr = ni.blk_addr; - return f2fs_submit_page_bio(&fio); + + err = f2fs_submit_page_bio(&fio); + + if (!err) + f2fs_update_iostat(sbi, FS_NODE_READ_IO, F2FS_BLKSIZE); + + return err; } /* diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index 2158f27fc701f75c7f4d007541bb5312d3fdedf0..021a1bb9eb43f94c182b88e11aba6f433d755fc7 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -1029,9 +1029,9 @@ static void f2fs_submit_discard_endio(struct bio *bio) struct discard_cmd *dc = (struct discard_cmd *)bio->bi_private; unsigned long flags; - dc->error = blk_status_to_errno(bio->bi_status); - spin_lock_irqsave(&dc->lock, flags); + if (!dc->error) + dc->error = blk_status_to_errno(bio->bi_status); dc->bio_ref--; if (!dc->bio_ref && dc->state == D_SUBMIT) { dc->state = D_DONE; @@ -1101,7 +1101,6 @@ static void __init_discard_policy(struct f2fs_sb_info *sbi, } else if (discard_type == DPOLICY_FSTRIM) { dpolicy->io_aware = false; } else if (discard_type == DPOLICY_UMOUNT) { - dpolicy->max_requests = UINT_MAX; dpolicy->io_aware = false; /* we need to issue all to keep CP_TRIMMED_FLAG */ dpolicy->granularity = 1; @@ -1215,8 +1214,10 @@ static int __submit_discard_cmd(struct f2fs_sb_info *sbi, len = total_len; } - if (!err && len) + if (!err && len) { + dcc->undiscard_blks -= len; __update_discard_tree_range(sbi, bdev, lstart, start, len); + } return err; } @@ -1463,6 +1464,8 @@ static unsigned int __issue_discard_cmd_orderly(struct f2fs_sb_info *sbi, return issued; } +static unsigned int __wait_all_discard_cmd(struct f2fs_sb_info *sbi, + struct discard_policy *dpolicy); static int __issue_discard_cmd(struct f2fs_sb_info *sbi, struct discard_policy *dpolicy) @@ -1471,12 +1474,14 @@ static int __issue_discard_cmd(struct f2fs_sb_info *sbi, struct list_head *pend_list; struct discard_cmd *dc, *tmp; struct blk_plug plug; - int i, issued = 0; + int i, issued; bool io_interrupted = false; if (dpolicy->timeout) f2fs_update_time(sbi, UMOUNT_DISCARD_TIMEOUT); +retry: + issued = 0; for (i = MAX_PLIST_NUM - 1; i >= 0; i--) { if (dpolicy->timeout && f2fs_time_over(sbi, UMOUNT_DISCARD_TIMEOUT)) @@ -1523,6 +1528,11 @@ static int __issue_discard_cmd(struct f2fs_sb_info *sbi, break; } + if (dpolicy->type == DPOLICY_UMOUNT && issued) { + __wait_all_discard_cmd(sbi, dpolicy); + goto retry; + } + if (!issued && io_interrupted) issued = -1; diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index 08b3f8264f64538b620ec8638f163b1edddb6f36..873ac3c4dce5ae6a9ff042c913be3d9bb99e59a1 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -3486,6 +3486,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) /* init iostat info */ spin_lock_init(&sbi->iostat_lock); sbi->iostat_enable = false; + sbi->iostat_period_ms = DEFAULT_IOSTAT_PERIOD_MS; for (i = 0; i < NR_PAGE_TYPE; i++) { int n = (i == META) ? 1: NR_TEMP_TYPE; diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c index e9d49bb6aed87a14ce1c16d2dadc4563101bae8c..02326bea2b7019b3ac23af797d92157ead4f73c1 100644 --- a/fs/f2fs/sysfs.c +++ b/fs/f2fs/sysfs.c @@ -15,6 +15,7 @@ #include "f2fs.h" #include "segment.h" #include "gc.h" +#include static struct proc_dir_entry *f2fs_proc_root; @@ -374,7 +375,6 @@ static ssize_t __sbi_store(struct f2fs_attr *a, return count; } - if (!strcmp(a->attr.name, "iostat_enable")) { sbi->iostat_enable = !!t; if (!sbi->iostat_enable) @@ -382,6 +382,15 @@ static ssize_t __sbi_store(struct f2fs_attr *a, return count; } + if (!strcmp(a->attr.name, "iostat_period_ms")) { + if (t < MIN_IOSTAT_PERIOD_MS || t > MAX_IOSTAT_PERIOD_MS) + return -EINVAL; + spin_lock(&sbi->iostat_lock); + sbi->iostat_period_ms = (unsigned int)t; + spin_unlock(&sbi->iostat_lock); + return count; + } + *ui = (unsigned int)t; return count; @@ -538,6 +547,7 @@ F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, gc_idle_interval, interval_time[GC_TIME]); F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, umount_discard_timeout, interval_time[UMOUNT_DISCARD_TIMEOUT]); F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, iostat_enable, iostat_enable); +F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, iostat_period_ms, iostat_period_ms); F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, readdir_ra, readdir_ra); F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, gc_pin_file_thresh, gc_pin_file_threshold); F2FS_RW_ATTR(F2FS_SBI, f2fs_super_block, extension_list, extension_list); @@ -545,6 +555,7 @@ F2FS_RW_ATTR(F2FS_SBI, f2fs_super_block, extension_list, extension_list); F2FS_RW_ATTR(FAULT_INFO_RATE, f2fs_fault_info, inject_rate, inject_rate); F2FS_RW_ATTR(FAULT_INFO_TYPE, f2fs_fault_info, inject_type, inject_type); #endif +F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, data_io_flag, data_io_flag); F2FS_GENERAL_RO_ATTR(dirty_segments); F2FS_GENERAL_RO_ATTR(free_segments); F2FS_GENERAL_RO_ATTR(lifetime_write_kbytes); @@ -617,6 +628,7 @@ static struct attribute *f2fs_attrs[] = { ATTR_LIST(gc_idle_interval), ATTR_LIST(umount_discard_timeout), ATTR_LIST(iostat_enable), + ATTR_LIST(iostat_period_ms), ATTR_LIST(readdir_ra), ATTR_LIST(gc_pin_file_thresh), ATTR_LIST(extension_list), @@ -624,6 +636,7 @@ static struct attribute *f2fs_attrs[] = { ATTR_LIST(inject_rate), ATTR_LIST(inject_type), #endif + ATTR_LIST(data_io_flag), ATTR_LIST(dirty_segments), ATTR_LIST(free_segments), ATTR_LIST(unusable), @@ -750,6 +763,33 @@ static int __maybe_unused segment_bits_seq_show(struct seq_file *seq, return 0; } +void f2fs_record_iostat(struct f2fs_sb_info *sbi) +{ + unsigned long long iostat_diff[NR_IO_TYPE]; + int i; + + if (time_is_after_jiffies(sbi->iostat_next_period)) + return; + + /* Need double check under the lock */ + spin_lock(&sbi->iostat_lock); + if (time_is_after_jiffies(sbi->iostat_next_period)) { + spin_unlock(&sbi->iostat_lock); + return; + } + sbi->iostat_next_period = jiffies + + msecs_to_jiffies(sbi->iostat_period_ms); + + for (i = 0; i < NR_IO_TYPE; i++) { + iostat_diff[i] = sbi->rw_iostat[i] - + sbi->prev_rw_iostat[i]; + sbi->prev_rw_iostat[i] = sbi->rw_iostat[i]; + } + spin_unlock(&sbi->iostat_lock); + + trace_f2fs_iostat(sbi, iostat_diff); +} + static int __maybe_unused iostat_info_seq_show(struct seq_file *seq, void *offset) { @@ -762,33 +802,51 @@ static int __maybe_unused iostat_info_seq_show(struct seq_file *seq, seq_printf(seq, "time: %-16llu\n", now); - /* print app IOs */ + /* print app write IOs */ seq_printf(seq, "app buffered: %-16llu\n", - sbi->write_iostat[APP_BUFFERED_IO]); + sbi->rw_iostat[APP_BUFFERED_IO]); seq_printf(seq, "app direct: %-16llu\n", - sbi->write_iostat[APP_DIRECT_IO]); + sbi->rw_iostat[APP_DIRECT_IO]); seq_printf(seq, "app mapped: %-16llu\n", - sbi->write_iostat[APP_MAPPED_IO]); + sbi->rw_iostat[APP_MAPPED_IO]); - /* print fs IOs */ + /* print fs write IOs */ seq_printf(seq, "fs data: %-16llu\n", - sbi->write_iostat[FS_DATA_IO]); + sbi->rw_iostat[FS_DATA_IO]); seq_printf(seq, "fs node: %-16llu\n", - sbi->write_iostat[FS_NODE_IO]); + sbi->rw_iostat[FS_NODE_IO]); seq_printf(seq, "fs meta: %-16llu\n", - sbi->write_iostat[FS_META_IO]); + sbi->rw_iostat[FS_META_IO]); seq_printf(seq, "fs gc data: %-16llu\n", - sbi->write_iostat[FS_GC_DATA_IO]); + sbi->rw_iostat[FS_GC_DATA_IO]); seq_printf(seq, "fs gc node: %-16llu\n", - sbi->write_iostat[FS_GC_NODE_IO]); + sbi->rw_iostat[FS_GC_NODE_IO]); seq_printf(seq, "fs cp data: %-16llu\n", - sbi->write_iostat[FS_CP_DATA_IO]); + sbi->rw_iostat[FS_CP_DATA_IO]); seq_printf(seq, "fs cp node: %-16llu\n", - sbi->write_iostat[FS_CP_NODE_IO]); + sbi->rw_iostat[FS_CP_NODE_IO]); seq_printf(seq, "fs cp meta: %-16llu\n", - sbi->write_iostat[FS_CP_META_IO]); + sbi->rw_iostat[FS_CP_META_IO]); + + /* print app read IOs */ + seq_printf(seq, "app buffered: %-16llu\n", + sbi->rw_iostat[APP_BUFFERED_READ_IO]); + seq_printf(seq, "app direct: %-16llu\n", + sbi->rw_iostat[APP_DIRECT_READ_IO]); + seq_printf(seq, "app mapped: %-16llu\n", + sbi->rw_iostat[APP_MAPPED_READ_IO]); + + /* print fs read IOs */ + seq_printf(seq, "fs data: %-16llu\n", + sbi->rw_iostat[FS_DATA_READ_IO]); + seq_printf(seq, "fs node: %-16llu\n", + sbi->rw_iostat[FS_NODE_READ_IO]); + seq_printf(seq, "fs meta: %-16llu\n", + sbi->rw_iostat[FS_META_READ_IO]); + + /* print other IOs */ seq_printf(seq, "fs discard: %-16llu\n", - sbi->write_iostat[FS_DISCARD]); + sbi->rw_iostat[FS_DISCARD]); return 0; } diff --git a/fs/filesystems.c b/fs/filesystems.c index b03f57b1105b34bc5d7438c95f62e2430eb756a1..181200daeeba78c3f83d4dd9feeaf67b4761e091 100644 --- a/fs/filesystems.c +++ b/fs/filesystems.c @@ -267,7 +267,9 @@ struct file_system_type *get_fs_type(const char *name) fs = __get_fs_type(name, len); if (!fs && (request_module("fs-%.*s", len, name) == 0)) { fs = __get_fs_type(name, len); - WARN_ONCE(!fs, "request_module fs-%.*s succeeded, but still no fs?\n", len, name); + if (!fs) + pr_warn_once("request_module fs-%.*s succeeded, but still no fs?\n", + len, name); } if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) { diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index ccdd8c821abd72049d744d0e45d9a7d99aabf2ed..f8a5eef3d014bf6408118627d2a75fdead062cd1 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -636,6 +636,9 @@ __acquires(&gl->gl_lockref.lock) goto out_unlock; if (nonblock) goto out_sched; + smp_mb(); + if (atomic_read(&gl->gl_revokes) != 0) + goto out_sched; set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags); GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE); gl->gl_target = gl->gl_demote_state; diff --git a/fs/hfsplus/attributes.c b/fs/hfsplus/attributes.c index e6d554476db41cf61f1d850682f2aff0583e9122..eeebe80c6be4aa39878bee63521d2558d9a189da 100644 --- a/fs/hfsplus/attributes.c +++ b/fs/hfsplus/attributes.c @@ -292,6 +292,10 @@ static int __hfsplus_delete_attr(struct inode *inode, u32 cnid, return -ENOENT; } + /* Avoid btree corruption */ + hfs_bnode_read(fd->bnode, fd->search_key, + fd->keyoffset, fd->keylength); + err = hfs_brec_remove(fd); if (err) return err; diff --git a/fs/incfs/data_mgmt.c b/fs/incfs/data_mgmt.c index 91541b46f771733a80124e70203add0dddc0d6c4..bf34f57e27a23a9ee1f8faba29a572aa99512c88 100644 --- a/fs/incfs/data_mgmt.c +++ b/fs/incfs/data_mgmt.c @@ -8,6 +8,8 @@ #include #include #include +#include +#include #include #include @@ -15,6 +17,13 @@ #include "format.h" #include "integrity.h" +static void log_wake_up_all(struct work_struct *work) +{ + struct delayed_work *dw = container_of(work, struct delayed_work, work); + struct read_log *rl = container_of(dw, struct read_log, ml_wakeup_work); + wake_up_all(&rl->ml_notif_wq); +} + struct mount_info *incfs_alloc_mount_info(struct super_block *sb, struct mount_options *options, struct path *backing_dir_path) @@ -34,7 +43,8 @@ struct mount_info *incfs_alloc_mount_info(struct super_block *sb, mutex_init(&mi->mi_pending_reads_mutex); init_waitqueue_head(&mi->mi_pending_reads_notif_wq); init_waitqueue_head(&mi->mi_log.ml_notif_wq); - spin_lock_init(&mi->mi_log.rl_writer_lock); + INIT_DELAYED_WORK(&mi->mi_log.ml_wakeup_work, log_wake_up_all); + spin_lock_init(&mi->mi_log.rl_lock); INIT_LIST_HEAD(&mi->mi_reads_list_head); error = incfs_realloc_mount_info(mi, options); @@ -51,20 +61,41 @@ struct mount_info *incfs_alloc_mount_info(struct super_block *sb, int incfs_realloc_mount_info(struct mount_info *mi, struct mount_options *options) { - kfree(mi->mi_log.rl_ring_buf); - mi->mi_log.rl_ring_buf = NULL; - mi->mi_log.rl_size = 0; + void *new_buffer = NULL; + void *old_buffer; + size_t new_buffer_size = 0; - mi->mi_options = *options; - if (options->read_log_pages != 0) { - size_t buf_size = PAGE_SIZE * options->read_log_pages; + if (options->read_log_pages != mi->mi_options.read_log_pages) { + struct read_log_state log_state; + /* + * Even though having two buffers allocated at once isn't + * usually good, allocating a multipage buffer under a spinlock + * is even worse, so let's optimize for the shorter lock + * duration. It's not end of the world if we fail to increase + * the buffer size anyway. + */ + if (options->read_log_pages > 0) { + new_buffer_size = PAGE_SIZE * options->read_log_pages; + new_buffer = kzalloc(new_buffer_size, GFP_NOFS); + if (!new_buffer) + return -ENOMEM; + } + + spin_lock(&mi->mi_log.rl_lock); + old_buffer = mi->mi_log.rl_ring_buf; + mi->mi_log.rl_ring_buf = new_buffer; + mi->mi_log.rl_size = new_buffer_size; + log_state = (struct read_log_state){ + .generation_id = mi->mi_log.rl_head.generation_id + 1, + }; + mi->mi_log.rl_head = log_state; + mi->mi_log.rl_tail = log_state; + spin_unlock(&mi->mi_log.rl_lock); - mi->mi_log.rl_size = buf_size / sizeof(*mi->mi_log.rl_ring_buf); - mi->mi_log.rl_ring_buf = kzalloc(buf_size, GFP_NOFS); - if (!mi->mi_log.rl_ring_buf) - return -ENOMEM; + kfree(old_buffer); } + mi->mi_options = *options; return 0; } @@ -73,6 +104,8 @@ void incfs_free_mount_info(struct mount_info *mi) if (!mi) return; + flush_delayed_work(&mi->mi_log.ml_wakeup_work); + dput(mi->mi_index_dir); path_put(&mi->mi_backing_dir_path); mutex_destroy(&mi->mi_dir_struct_mutex); @@ -227,37 +260,136 @@ static ssize_t decompress(struct mem_range src, struct mem_range dst) return result; } +static void log_read_one_record(struct read_log *rl, struct read_log_state *rs) +{ + union log_record *record = + (union log_record *)((u8 *)rl->rl_ring_buf + rs->next_offset); + size_t record_size; + + switch (record->full_record.type) { + case FULL: + rs->base_record = record->full_record; + record_size = sizeof(record->full_record); + break; + + case SAME_FILE: + rs->base_record.block_index = + record->same_file_record.block_index; + rs->base_record.absolute_ts_us += + record->same_file_record.relative_ts_us; + record_size = sizeof(record->same_file_record); + break; + + case SAME_FILE_NEXT_BLOCK: + ++rs->base_record.block_index; + rs->base_record.absolute_ts_us += + record->same_file_next_block.relative_ts_us; + record_size = sizeof(record->same_file_next_block); + break; + + case SAME_FILE_NEXT_BLOCK_SHORT: + ++rs->base_record.block_index; + rs->base_record.absolute_ts_us += + record->same_file_next_block_short.relative_ts_us; + record_size = sizeof(record->same_file_next_block_short); + break; + } + + rs->next_offset += record_size; + if (rs->next_offset > rl->rl_size - sizeof(*record)) { + rs->next_offset = 0; + ++rs->current_pass_no; + } + ++rs->current_record_no; +} + static void log_block_read(struct mount_info *mi, incfs_uuid_t *id, - int block_index, bool timed_out) + int block_index) { struct read_log *log = &mi->mi_log; - struct read_log_state state; - s64 now_us = ktime_to_us(ktime_get()); - struct read_log_record record = { - .file_id = *id, - .block_index = block_index, - .timed_out = timed_out, - .timestamp_us = now_us - }; - - if (log->rl_size == 0) + struct read_log_state *head, *tail; + s64 now_us; + s64 relative_us; + union log_record record; + size_t record_size; + + /* + * This may read the old value, but it's OK to delay the logging start + * right after the configuration update. + */ + if (READ_ONCE(log->rl_size) == 0) return; - spin_lock(&log->rl_writer_lock); - state = READ_ONCE(log->rl_state); - log->rl_ring_buf[state.next_index] = record; - if (++state.next_index == log->rl_size) { - state.next_index = 0; - ++state.current_pass_no; + now_us = ktime_to_us(ktime_get()); + + spin_lock(&log->rl_lock); + if (log->rl_size == 0) { + spin_unlock(&log->rl_lock); + return; } - WRITE_ONCE(log->rl_state, state); - spin_unlock(&log->rl_writer_lock); - wake_up_all(&log->ml_notif_wq); + head = &log->rl_head; + tail = &log->rl_tail; + relative_us = now_us - head->base_record.absolute_ts_us; + + if (memcmp(id, &head->base_record.file_id, sizeof(incfs_uuid_t)) || + relative_us >= 1ll << 32) { + record.full_record = (struct full_record){ + .type = FULL, + .block_index = block_index, + .file_id = *id, + .absolute_ts_us = now_us, + }; + head->base_record.file_id = *id; + record_size = sizeof(struct full_record); + } else if (block_index != head->base_record.block_index + 1 || + relative_us >= 1 << 30) { + record.same_file_record = (struct same_file_record){ + .type = SAME_FILE, + .block_index = block_index, + .relative_ts_us = relative_us, + }; + record_size = sizeof(struct same_file_record); + } else if (relative_us >= 1 << 14) { + record.same_file_next_block = (struct same_file_next_block){ + .type = SAME_FILE_NEXT_BLOCK, + .relative_ts_us = relative_us, + }; + record_size = sizeof(struct same_file_next_block); + } else { + record.same_file_next_block_short = + (struct same_file_next_block_short){ + .type = SAME_FILE_NEXT_BLOCK_SHORT, + .relative_ts_us = relative_us, + }; + record_size = sizeof(struct same_file_next_block_short); + } + + head->base_record.block_index = block_index; + head->base_record.absolute_ts_us = now_us; + + /* Advance tail beyond area we are going to overwrite */ + while (tail->current_pass_no < head->current_pass_no && + tail->next_offset < head->next_offset + record_size) + log_read_one_record(log, tail); + + memcpy(((u8 *)log->rl_ring_buf) + head->next_offset, &record, + record_size); + head->next_offset += record_size; + if (head->next_offset > log->rl_size - sizeof(record)) { + head->next_offset = 0; + ++head->current_pass_no; + } + ++head->current_record_no; + + spin_unlock(&log->rl_lock); + if (schedule_delayed_work(&log->ml_wakeup_work, msecs_to_jiffies(16))) + pr_debug("incfs: scheduled a log pollers wakeup"); } static int validate_hash_tree(struct file *bf, struct data_file *df, - int block_index, struct mem_range data, u8 *buf) + int block_index, struct mem_range data, + u8 *tmp_buf) { u8 digest[INCFS_MAX_HASH_SIZE] = {}; struct mtree *tree = NULL; @@ -270,6 +402,7 @@ static int validate_hash_tree(struct file *bf, struct data_file *df, int hash_per_block; int lvl = 0; int res; + struct page *saved_page = NULL; tree = df->df_hash_tree; sig = df->df_signature; @@ -291,17 +424,39 @@ static int validate_hash_tree(struct file *bf, struct data_file *df, INCFS_DATA_FILE_BLOCK_SIZE); size_t hash_off_in_block = hash_block_index * digest_size % INCFS_DATA_FILE_BLOCK_SIZE; - struct mem_range buf_range = range(buf, - INCFS_DATA_FILE_BLOCK_SIZE); - ssize_t read_res = incfs_kread(bf, buf, - INCFS_DATA_FILE_BLOCK_SIZE, hash_block_off); + struct mem_range buf_range; + struct page *page = NULL; + bool aligned = (hash_block_off & + (INCFS_DATA_FILE_BLOCK_SIZE - 1)) == 0; + u8 *actual_buf; + + if (aligned) { + page = read_mapping_page( + bf->f_inode->i_mapping, + hash_block_off / INCFS_DATA_FILE_BLOCK_SIZE, + NULL); + + if (IS_ERR(page)) + return PTR_ERR(page); + + actual_buf = page_address(page); + } else { + size_t read_res = + incfs_kread(bf, tmp_buf, + INCFS_DATA_FILE_BLOCK_SIZE, + hash_block_off); + + if (read_res < 0) + return read_res; + if (read_res != INCFS_DATA_FILE_BLOCK_SIZE) + return -EIO; - if (read_res < 0) - return read_res; - if (read_res != INCFS_DATA_FILE_BLOCK_SIZE) - return -EIO; + actual_buf = tmp_buf; + } - saved_digest_rng = range(buf + hash_off_in_block, digest_size); + buf_range = range(actual_buf, INCFS_DATA_FILE_BLOCK_SIZE); + saved_digest_rng = + range(actual_buf + hash_off_in_block, digest_size); if (!incfs_equal_ranges(calc_digest_rng, saved_digest_rng)) { int i; bool zero = true; @@ -316,9 +471,37 @@ static int validate_hash_tree(struct file *bf, struct data_file *df, if (zero) pr_debug("incfs: Note saved_digest all zero - did you forget to load the hashes?\n"); + + if (saved_page) + put_page(saved_page); + if (page) + put_page(page); return -EBADMSG; } + if (saved_page) { + /* + * This is something of a kludge. The PageChecked flag + * is reserved for the file system, but we are setting + * this on the pages belonging to the underlying file + * system. incfs is only going to be used on f2fs and + * ext4 which only use this flag when fs-verity is being + * used, so this is safe for now, however a better + * mechanism needs to be found. + */ + SetPageChecked(saved_page); + put_page(saved_page); + saved_page = NULL; + } + + if (page && PageChecked(page)) { + put_page(page); + return 0; + } + + saved_page = page; + page = NULL; + res = incfs_calc_digest(tree->alg, buf_range, calc_digest_rng); if (res) return res; @@ -328,8 +511,15 @@ static int validate_hash_tree(struct file *bf, struct data_file *df, root_hash_rng = range(tree->root_hash, digest_size); if (!incfs_equal_ranges(calc_digest_rng, root_hash_rng)) { pr_debug("incfs: Root hash mismatch blk:%d\n", block_index); + if (saved_page) + put_page(saved_page); return -EBADMSG; } + + if (saved_page) { + SetPageChecked(saved_page); + put_page(saved_page); + } return 0; } @@ -347,13 +537,28 @@ static bool is_data_block_present(struct data_file_block *block) (block->db_stored_size != 0); } +static void convert_data_file_block(struct incfs_blockmap_entry *bme, + struct data_file_block *res_block) +{ + u16 flags = le16_to_cpu(bme->me_flags); + + res_block->db_backing_file_data_offset = + le16_to_cpu(bme->me_data_offset_hi); + res_block->db_backing_file_data_offset <<= 32; + res_block->db_backing_file_data_offset |= + le32_to_cpu(bme->me_data_offset_lo); + res_block->db_stored_size = le16_to_cpu(bme->me_data_size); + res_block->db_comp_alg = (flags & INCFS_BLOCK_COMPRESSED_LZ4) ? + COMPRESSION_LZ4 : + COMPRESSION_NONE; +} + static int get_data_file_block(struct data_file *df, int index, struct data_file_block *res_block) { struct incfs_blockmap_entry bme = {}; struct backing_file_context *bfc = NULL; loff_t blockmap_off = 0; - u16 flags = 0; int error = 0; if (!df || !res_block) @@ -369,16 +574,7 @@ static int get_data_file_block(struct data_file *df, int index, if (error) return error; - flags = le16_to_cpu(bme.me_flags); - res_block->db_backing_file_data_offset = - le16_to_cpu(bme.me_data_offset_hi); - res_block->db_backing_file_data_offset <<= 32; - res_block->db_backing_file_data_offset |= - le32_to_cpu(bme.me_data_offset_lo); - res_block->db_stored_size = le16_to_cpu(bme.me_data_size); - res_block->db_comp_alg = (flags & INCFS_BLOCK_COMPRESSED_LZ4) ? - COMPRESSION_LZ4 : - COMPRESSION_NONE; + convert_data_file_block(&bme, res_block); return 0; } @@ -396,7 +592,8 @@ static int copy_one_range(struct incfs_filled_range *range, void __user *buffer, if (error) return error; - if (copy_to_user(((char *)buffer) + *size_out, range, sizeof(*range))) + if (copy_to_user(((char __user *)buffer) + *size_out, range, + sizeof(*range))) return -EFAULT; *size_out += sizeof(*range); @@ -431,17 +628,21 @@ static int update_file_header_flags(struct data_file *df, u32 bits_to_reset, return result; } +#define READ_BLOCKMAP_ENTRIES 512 int incfs_get_filled_blocks(struct data_file *df, struct incfs_get_filled_blocks_args *arg) { int error = 0; bool in_range = false; struct incfs_filled_range range; - void *buffer = u64_to_user_ptr(arg->range_buffer); + void __user *buffer = u64_to_user_ptr(arg->range_buffer); u32 size = arg->range_buffer_size; u32 end_index = arg->end_index ? arg->end_index : df->df_total_block_count; u32 *size_out = &arg->range_buffer_size_out; + int i = READ_BLOCKMAP_ENTRIES - 1; + int entries_read = 0; + struct incfs_blockmap_entry *bme; *size_out = 0; if (end_index > df->df_total_block_count) @@ -473,13 +674,34 @@ int incfs_get_filled_blocks(struct data_file *df, return 0; } + bme = kzalloc(sizeof(*bme) * READ_BLOCKMAP_ENTRIES, + GFP_NOFS | __GFP_COMP); + if (!bme) + return -ENOMEM; + for (arg->index_out = arg->start_index; arg->index_out < end_index; ++arg->index_out) { struct data_file_block dfb; - error = get_data_file_block(df, arg->index_out, &dfb); - if (error) + if (++i == READ_BLOCKMAP_ENTRIES) { + entries_read = incfs_read_blockmap_entries( + df->df_backing_file_context, bme, + arg->index_out, READ_BLOCKMAP_ENTRIES, + df->df_blockmap_off); + if (entries_read < 0) { + error = entries_read; + break; + } + + i = 0; + } + + if (i >= entries_read) { + error = -EIO; break; + } + + convert_data_file_block(bme + i, &dfb); if (is_data_block_present(&dfb) == in_range) continue; @@ -519,6 +741,7 @@ int incfs_get_filled_blocks(struct data_file *df, pr_debug("Marked file full with result %d", result); } + kfree(bme); return error; } @@ -652,8 +875,7 @@ static int wait_for_data_block(struct data_file *df, int block_index, mi = df->df_mount_info; if (timeout_ms == 0) { - log_block_read(mi, &df->df_id, block_index, - true /*timed out*/); + log_block_read(mi, &df->df_id, block_index); return -ETIME; } @@ -672,8 +894,7 @@ static int wait_for_data_block(struct data_file *df, int block_index, if (wait_res == 0) { /* Wait has timed out */ - log_block_read(mi, &df->df_id, block_index, - true /*timed out*/); + log_block_read(mi, &df->df_id, block_index); return -ETIME; } if (wait_res < 0) { @@ -769,7 +990,7 @@ ssize_t incfs_read_data_file_block(struct mem_range dst, struct data_file *df, } if (result >= 0) - log_block_read(mi, &df->df_id, index, false /*timed out*/); + log_block_read(mi, &df->df_id, index); out: return result; @@ -1139,36 +1360,29 @@ struct read_log_state incfs_get_log_state(struct mount_info *mi) struct read_log *log = &mi->mi_log; struct read_log_state result; - spin_lock(&log->rl_writer_lock); - result = READ_ONCE(log->rl_state); - spin_unlock(&log->rl_writer_lock); + spin_lock(&log->rl_lock); + result = log->rl_head; + spin_unlock(&log->rl_lock); return result; } -static u64 calc_record_count(const struct read_log_state *state, int rl_size) -{ - return state->current_pass_no * (u64)rl_size + state->next_index; -} - int incfs_get_uncollected_logs_count(struct mount_info *mi, - struct read_log_state state) + const struct read_log_state *state) { struct read_log *log = &mi->mi_log; - - u64 count = calc_record_count(&log->rl_state, log->rl_size) - - calc_record_count(&state, log->rl_size); - return min_t(int, count, log->rl_size); -} - -static void fill_pending_read_from_log_record( - struct incfs_pending_read_info *dest, const struct read_log_record *src, - struct read_log_state *state, u64 log_size) -{ - dest->file_id = src->file_id; - dest->block_index = src->block_index; - dest->serial_number = - state->current_pass_no * log_size + state->next_index; - dest->timestamp_us = src->timestamp_us; + u32 generation; + u64 head_no, tail_no; + + spin_lock(&log->rl_lock); + tail_no = log->rl_tail.current_record_no; + head_no = log->rl_head.current_record_no; + generation = log->rl_head.generation_id; + spin_unlock(&log->rl_lock); + + if (generation != state->generation_id) + return head_no - tail_no; + else + return head_no - max_t(u64, tail_no, state->current_record_no); } int incfs_collect_logged_reads(struct mount_info *mi, @@ -1176,58 +1390,47 @@ int incfs_collect_logged_reads(struct mount_info *mi, struct incfs_pending_read_info *reads, int reads_size) { - struct read_log *log = &mi->mi_log; - struct read_log_state live_state = incfs_get_log_state(mi); - u64 read_count = calc_record_count(reader_state, log->rl_size); - u64 written_count = calc_record_count(&live_state, log->rl_size); int dst_idx; + struct read_log *log = &mi->mi_log; + struct read_log_state *head, *tail; - if (reader_state->next_index >= log->rl_size || - read_count > written_count) - return -ERANGE; + spin_lock(&log->rl_lock); + head = &log->rl_head; + tail = &log->rl_tail; - if (read_count == written_count) - return 0; + if (reader_state->generation_id != head->generation_id) { + pr_debug("read ptr is wrong generation: %u/%u", + reader_state->generation_id, head->generation_id); - if (read_count > written_count) { - /* This reader is somehow ahead of the writer. */ - pr_debug("incfs: Log reader is ahead of writer\n"); - *reader_state = live_state; + *reader_state = (struct read_log_state){ + .generation_id = head->generation_id, + }; } - if (written_count - read_count > log->rl_size) { - /* - * Reading pointer is too far behind, - * start from the record following the write pointer. - */ - pr_debug("incfs: read pointer is behind, moving: %u/%u -> %u/%u / %u\n", - (u32)reader_state->next_index, - (u32)reader_state->current_pass_no, - (u32)live_state.next_index, - (u32)live_state.current_pass_no - 1, (u32)log->rl_size); + if (reader_state->current_record_no < tail->current_record_no) { + pr_debug("read ptr is behind, moving: %u/%u -> %u/%u\n", + (u32)reader_state->next_offset, + (u32)reader_state->current_pass_no, + (u32)tail->next_offset, (u32)tail->current_pass_no); - *reader_state = (struct read_log_state){ - .next_index = live_state.next_index, - .current_pass_no = live_state.current_pass_no - 1, - }; + *reader_state = *tail; } for (dst_idx = 0; dst_idx < reads_size; dst_idx++) { - if (reader_state->next_index == live_state.next_index && - reader_state->current_pass_no == live_state.current_pass_no) + if (reader_state->current_record_no == head->current_record_no) break; - fill_pending_read_from_log_record( - &reads[dst_idx], - &log->rl_ring_buf[reader_state->next_index], - reader_state, log->rl_size); + log_read_one_record(log, reader_state); - reader_state->next_index++; - if (reader_state->next_index == log->rl_size) { - reader_state->next_index = 0; - reader_state->current_pass_no++; - } + reads[dst_idx] = (struct incfs_pending_read_info){ + .file_id = reader_state->base_record.file_id, + .block_index = reader_state->base_record.block_index, + .serial_number = reader_state->current_record_no, + .timestamp_us = reader_state->base_record.absolute_ts_us + }; } + + spin_unlock(&log->rl_lock); return dst_idx; } diff --git a/fs/incfs/data_mgmt.h b/fs/incfs/data_mgmt.h index e8f2154c80d90d8b970c72302f5b3c17bf1fa0e1..d9d9966b838af68d005e2b7ae357f9423d457829 100644 --- a/fs/incfs/data_mgmt.h +++ b/fs/incfs/data_mgmt.h @@ -20,38 +20,78 @@ #define SEGMENTS_PER_FILE 3 -struct read_log_record { - u32 block_index : 31; - - u32 timed_out : 1; - - u64 timestamp_us; +enum LOG_RECORD_TYPE { + FULL, + SAME_FILE, + SAME_FILE_NEXT_BLOCK, + SAME_FILE_NEXT_BLOCK_SHORT, +}; +struct full_record { + enum LOG_RECORD_TYPE type : 2; /* FULL */ + u32 block_index : 30; incfs_uuid_t file_id; -} __packed; + u64 absolute_ts_us; +} __packed; /* 28 bytes */ + +struct same_file_record { + enum LOG_RECORD_TYPE type : 2; /* SAME_FILE */ + u32 block_index : 30; + u32 relative_ts_us; /* max 2^32 us ~= 1 hour (1:11:30) */ +} __packed; /* 12 bytes */ + +struct same_file_next_block { + enum LOG_RECORD_TYPE type : 2; /* SAME_FILE_NEXT_BLOCK */ + u32 relative_ts_us : 30; /* max 2^30 us ~= 15 min (17:50) */ +} __packed; /* 4 bytes */ + +struct same_file_next_block_short { + enum LOG_RECORD_TYPE type : 2; /* SAME_FILE_NEXT_BLOCK_SHORT */ + u16 relative_ts_us : 14; /* max 2^14 us ~= 16 ms */ +} __packed; /* 2 bytes */ + +union log_record { + struct full_record full_record; + struct same_file_record same_file_record; + struct same_file_next_block same_file_next_block; + struct same_file_next_block_short same_file_next_block_short; +}; struct read_log_state { - /* Next slot in rl_ring_buf to write to. */ - u32 next_index; + /* Log buffer generation id, incremented on configuration changes */ + u32 generation_id; - /* Current number of writer pass over rl_ring_buf */ + /* Offset in rl_ring_buf to write into. */ + u32 next_offset; + + /* Current number of writer passes over rl_ring_buf */ u32 current_pass_no; + + /* Current full_record to diff against */ + struct full_record base_record; + + /* Current record number counting from configuration change */ + u64 current_record_no; }; /* A ring buffer to save records about data blocks which were recently read. */ struct read_log { - struct read_log_record *rl_ring_buf; + void *rl_ring_buf; - struct read_log_state rl_state; + int rl_size; - spinlock_t rl_writer_lock; + struct read_log_state rl_head; - int rl_size; + struct read_log_state rl_tail; - /* - * A queue of waiters who want to be notified about reads. - */ + /* A lock to protect the above fields */ + spinlock_t rl_lock; + + /* A queue of waiters who want to be notified about reads */ wait_queue_head_t ml_notif_wq; + + /* A work item to wake up those waiters without slowing down readers */ + struct delayed_work ml_wakeup_work; }; struct mount_options { @@ -268,7 +308,7 @@ int incfs_collect_logged_reads(struct mount_info *mi, int reads_size); struct read_log_state incfs_get_log_state(struct mount_info *mi); int incfs_get_uncollected_logs_count(struct mount_info *mi, - struct read_log_state state); + const struct read_log_state *state); static inline struct inode_info *get_incfs_node(struct inode *inode) { diff --git a/fs/incfs/format.c b/fs/incfs/format.c index 1a7c4646a29152b6758833f8599640440d9f57b8..c56e559b68938ef4332acf6f0303a443941e8623 100644 --- a/fs/incfs/format.c +++ b/fs/incfs/format.c @@ -94,7 +94,6 @@ static int append_zeros(struct backing_file_context *bfc, size_t len) { loff_t file_size = 0; loff_t new_last_byte_offset = 0; - int res = 0; if (!bfc) return -EFAULT; @@ -111,28 +110,18 @@ static int append_zeros(struct backing_file_context *bfc, size_t len) */ file_size = incfs_get_end_offset(bfc->bc_file); new_last_byte_offset = file_size + len - 1; - res = vfs_fallocate(bfc->bc_file, 0, new_last_byte_offset, 1); - if (res) - return res; - - res = vfs_fsync_range(bfc->bc_file, file_size, file_size + len, 1); - return res; + return vfs_fallocate(bfc->bc_file, 0, new_last_byte_offset, 1); } static int write_to_bf(struct backing_file_context *bfc, const void *buf, - size_t count, loff_t pos, bool sync) + size_t count, loff_t pos) { - ssize_t res = 0; + ssize_t res = incfs_kwrite(bfc->bc_file, buf, count, pos); - res = incfs_kwrite(bfc->bc_file, buf, count, pos); if (res < 0) return res; if (res != count) return -EIO; - - if (sync) - return vfs_fsync_range(bfc->bc_file, pos, pos + count, 1); - return 0; } @@ -186,7 +175,7 @@ static int append_md_to_backing_file(struct backing_file_context *bfc, /* Write the metadata record to the end of the backing file */ record_offset = file_pos; new_md_offset = cpu_to_le64(record_offset); - result = write_to_bf(bfc, record, record_size, file_pos, true); + result = write_to_bf(bfc, record, record_size, file_pos); if (result) return result; @@ -207,7 +196,7 @@ static int append_md_to_backing_file(struct backing_file_context *bfc, fh_first_md_offset); } result = write_to_bf(bfc, &new_md_offset, sizeof(new_md_offset), - file_pos, true); + file_pos); if (result) return result; @@ -222,8 +211,7 @@ int incfs_write_file_header_flags(struct backing_file_context *bfc, u32 flags) return write_to_bf(bfc, &flags, sizeof(flags), offsetof(struct incfs_file_header, - fh_file_header_flags), - false); + fh_file_header_flags)); } /* @@ -292,7 +280,7 @@ int incfs_write_file_attr_to_backing_file(struct backing_file_context *bfc, file_attr.fa_offset = cpu_to_le64(value_offset); file_attr.fa_crc = cpu_to_le32(crc); - result = write_to_bf(bfc, value.data, value.len, value_offset, true); + result = write_to_bf(bfc, value.data, value.len, value_offset); if (result) return result; @@ -332,7 +320,7 @@ int incfs_write_signature_to_backing_file(struct backing_file_context *bfc, sg.sg_sig_size = cpu_to_le32(sig.len); sg.sg_sig_offset = cpu_to_le64(pos); - result = write_to_bf(bfc, sig.data, sig.len, pos, false); + result = write_to_bf(bfc, sig.data, sig.len, pos); if (result) goto err; } @@ -365,10 +353,9 @@ int incfs_write_signature_to_backing_file(struct backing_file_context *bfc, /* Write a hash tree metadata record pointing to the hash tree above. */ result = append_md_to_backing_file(bfc, &sg.sg_header); err: - if (result) { + if (result) /* Error, rollback file changes */ truncate_backing_file(bfc, rollback_pos); - } return result; } @@ -402,7 +389,7 @@ int incfs_write_fh_to_backing_file(struct backing_file_context *bfc, if (file_pos != 0) return -EEXIST; - return write_to_bf(bfc, &fh, sizeof(fh), file_pos, true); + return write_to_bf(bfc, &fh, sizeof(fh), file_pos); } /* Write a given data block and update file's blockmap to point it. */ @@ -431,7 +418,7 @@ int incfs_write_data_block_to_backing_file(struct backing_file_context *bfc, } /* Write the block data at the end of the backing file. */ - result = write_to_bf(bfc, block.data, block.len, data_offset, false); + result = write_to_bf(bfc, block.data, block.len, data_offset); if (result) return result; @@ -441,16 +428,16 @@ int incfs_write_data_block_to_backing_file(struct backing_file_context *bfc, bm_entry.me_data_size = cpu_to_le16((u16)block.len); bm_entry.me_flags = cpu_to_le16(flags); - result = write_to_bf(bfc, &bm_entry, sizeof(bm_entry), - bm_entry_off, false); - return result; + return write_to_bf(bfc, &bm_entry, sizeof(bm_entry), + bm_entry_off); } int incfs_write_hash_block_to_backing_file(struct backing_file_context *bfc, struct mem_range block, int block_index, loff_t hash_area_off, - loff_t bm_base_off, int file_size) + loff_t bm_base_off, + loff_t file_size) { struct incfs_blockmap_entry bm_entry = {}; int result; @@ -473,7 +460,7 @@ int incfs_write_hash_block_to_backing_file(struct backing_file_context *bfc, return -EINVAL; } - result = write_to_bf(bfc, block.data, block.len, data_offset, false); + result = write_to_bf(bfc, block.data, block.len, data_offset); if (result) return result; @@ -482,8 +469,7 @@ int incfs_write_hash_block_to_backing_file(struct backing_file_context *bfc, bm_entry.me_data_size = cpu_to_le16(INCFS_DATA_FILE_BLOCK_SIZE); bm_entry.me_flags = cpu_to_le16(INCFS_BLOCK_HASH); - return write_to_bf(bfc, &bm_entry, sizeof(bm_entry), bm_entry_off, - false); + return write_to_bf(bfc, &bm_entry, sizeof(bm_entry), bm_entry_off); } /* Initialize a new image in a given backing file. */ @@ -513,8 +499,19 @@ int incfs_read_blockmap_entry(struct backing_file_context *bfc, int block_index, loff_t bm_base_off, struct incfs_blockmap_entry *bm_entry) { - return incfs_read_blockmap_entries(bfc, bm_entry, block_index, 1, - bm_base_off); + int error = incfs_read_blockmap_entries(bfc, bm_entry, block_index, 1, + bm_base_off); + + if (error < 0) + return error; + + if (error == 0) + return -EIO; + + if (error != 1) + return -EFAULT; + + return 0; } int incfs_read_blockmap_entries(struct backing_file_context *bfc, @@ -538,9 +535,7 @@ int incfs_read_blockmap_entries(struct backing_file_context *bfc, bm_entry_off); if (result < 0) return result; - if (result < bytes_to_read) - return -EIO; - return 0; + return result / sizeof(*entries); } int incfs_read_file_header(struct backing_file_context *bfc, diff --git a/fs/incfs/format.h b/fs/incfs/format.h index deb5ca5bb0da9c5e007386d6c596708202b5bbb0..1a83349bb2eb705a25f09fe2e5747d6fcb2ad827 100644 --- a/fs/incfs/format.h +++ b/fs/incfs/format.h @@ -303,7 +303,8 @@ int incfs_write_hash_block_to_backing_file(struct backing_file_context *bfc, struct mem_range block, int block_index, loff_t hash_area_off, - loff_t bm_base_off, int file_size); + loff_t bm_base_off, + loff_t file_size); int incfs_write_file_attr_to_backing_file(struct backing_file_context *bfc, struct mem_range value, struct incfs_file_attr *attr); diff --git a/fs/incfs/integrity.c b/fs/incfs/integrity.c index 96e016a91542cbe1828506a99ea44c418d381b8b..d049988ef037b5347bf8f61bc55d5cce5b6613a4 100644 --- a/fs/incfs/integrity.c +++ b/fs/incfs/integrity.c @@ -62,7 +62,7 @@ static bool read_u32(u8 **p, u8 *top, u32 *result) if (*p + sizeof(u32) > top) return false; - *result = le32_to_cpu(*(u32 *)*p); + *result = le32_to_cpu(*(__le32 *)*p); *p += sizeof(u32); return true; } diff --git a/fs/incfs/vfs.c b/fs/incfs/vfs.c index 00b549a4ee41f32470c87a0e22921f2b2cfb9078..12cb3c750b24914f40a77dceeb9e1b401d2ed1ed 100644 --- a/fs/incfs/vfs.c +++ b/fs/incfs/vfs.c @@ -68,6 +68,7 @@ static struct inode *alloc_inode(struct super_block *sb); static void free_inode(struct inode *inode); static void evict_inode(struct inode *inode); +static int incfs_setattr(struct dentry *dentry, struct iattr *ia); static ssize_t incfs_getxattr(struct dentry *d, const char *name, void *value, size_t size); static ssize_t incfs_setxattr(struct dentry *d, const char *name, @@ -98,7 +99,8 @@ static const struct inode_operations incfs_dir_inode_ops = { .rename = dir_rename_wrap, .unlink = dir_unlink, .link = dir_link, - .rmdir = dir_rmdir + .rmdir = dir_rmdir, + .setattr = incfs_setattr, }; static const struct file_operations incfs_dir_fops = { @@ -158,7 +160,7 @@ static const struct file_operations incfs_log_file_ops = { }; static const struct inode_operations incfs_file_inode_ops = { - .setattr = simple_setattr, + .setattr = incfs_setattr, .getattr = simple_getattr, .listxattr = incfs_listxattr }; @@ -204,6 +206,8 @@ struct inode_search { unsigned long ino; struct dentry *backing_dentry; + + size_t size; }; enum parse_parameter { @@ -362,13 +366,14 @@ static int inode_set(struct inode *inode, void *opaque) fsstack_copy_attr_all(inode, backing_inode); if (S_ISREG(inode->i_mode)) { - u64 size = read_size_attr(backing_dentry); + u64 size = search->size; inode->i_size = size; inode->i_blocks = get_blocks_count_for_size(size); inode->i_mapping->a_ops = &incfs_address_space_ops; inode->i_op = &incfs_file_inode_ops; inode->i_fop = &incfs_file_ops; + inode->i_mode &= ~0222; } else if (S_ISDIR(inode->i_mode)) { inode->i_size = 0; inode->i_blocks = 1; @@ -437,7 +442,8 @@ static struct inode *fetch_regular_inode(struct super_block *sb, struct inode *backing_inode = d_inode(backing_dentry); struct inode_search search = { .ino = backing_inode->i_ino, - .backing_dentry = backing_dentry + .backing_dentry = backing_dentry, + .size = read_size_attr(backing_dentry), }; struct inode *inode = iget5_locked(sb, search.ino, inode_test, inode_set, &search); @@ -581,22 +587,27 @@ static ssize_t log_read(struct file *f, char __user *buf, size_t len, { struct log_file_state *log_state = f->private_data; struct mount_info *mi = get_mount_info(file_superblock(f)); - struct incfs_pending_read_info *reads_buf = - (struct incfs_pending_read_info *)__get_free_page(GFP_NOFS); - size_t reads_to_collect = len / sizeof(*reads_buf); - size_t reads_per_page = PAGE_SIZE / sizeof(*reads_buf); int total_reads_collected = 0; + int rl_size; ssize_t result = 0; + struct incfs_pending_read_info *reads_buf; + ssize_t reads_to_collect = len / sizeof(*reads_buf); + ssize_t reads_per_page = PAGE_SIZE / sizeof(*reads_buf); + + rl_size = READ_ONCE(mi->mi_log.rl_size); + if (rl_size == 0) + return 0; + reads_buf = (struct incfs_pending_read_info *)__get_free_page(GFP_NOFS); if (!reads_buf) return -ENOMEM; - reads_to_collect = min_t(size_t, mi->mi_log.rl_size, reads_to_collect); + reads_to_collect = min_t(ssize_t, rl_size, reads_to_collect); while (reads_to_collect > 0) { struct read_log_state next_state = READ_ONCE(log_state->state); int reads_collected = incfs_collect_logged_reads( mi, &next_state, reads_buf, - min_t(size_t, reads_to_collect, reads_per_page)); + min_t(ssize_t, reads_to_collect, reads_per_page)); if (reads_collected <= 0) { result = total_reads_collected ? total_reads_collected * @@ -635,7 +646,7 @@ static __poll_t log_poll(struct file *file, poll_table *wait) __poll_t ret = 0; poll_wait(file, &mi->mi_log.ml_notif_wq, wait); - count = incfs_get_uncollected_logs_count(mi, log_state->state); + count = incfs_get_uncollected_logs_count(mi, &log_state->state); if (count >= mi->mi_options.read_log_wakeup_count) ret = EPOLLIN | EPOLLRDNORM; @@ -853,7 +864,7 @@ static struct mem_range incfs_copy_signature_info_from_user(u8 __user *original, if (size > INCFS_MAX_SIGNATURE_SIZE) return range(ERR_PTR(-EFAULT), 0); - result = kzalloc(size, GFP_NOFS); + result = kzalloc(size, GFP_NOFS | __GFP_COMP); if (!result) return range(ERR_PTR(-ENOMEM), 0); @@ -885,7 +896,8 @@ static int init_new_file(struct mount_info *mi, struct dentry *dentry, .mnt = mi->mi_backing_dir_path.mnt, .dentry = dentry }; - new_file = dentry_open(&path, O_RDWR | O_NOATIME, mi->mi_owner); + new_file = dentry_open(&path, O_RDWR | O_NOATIME | O_LARGEFILE, + mi->mi_owner); if (IS_ERR(new_file)) { error = PTR_ERR(new_file); @@ -1274,7 +1286,7 @@ static long ioctl_fill_blocks(struct file *f, void __user *arg) { struct incfs_fill_blocks __user *usr_fill_blocks = arg; struct incfs_fill_blocks fill_blocks; - struct incfs_fill_block *usr_fill_block_array; + struct incfs_fill_block __user *usr_fill_block_array; struct data_file *df = get_incfs_data_file(f); const ssize_t data_buf_size = 2 * INCFS_DATA_FILE_BLOCK_SIZE; u8 *data_buf = NULL; @@ -1291,7 +1303,8 @@ static long ioctl_fill_blocks(struct file *f, void __user *arg) return -EFAULT; usr_fill_block_array = u64_to_user_ptr(fill_blocks.fill_blocks); - data_buf = (u8 *)__get_free_pages(GFP_NOFS, get_order(data_buf_size)); + data_buf = (u8 *)__get_free_pages(GFP_NOFS | __GFP_COMP, + get_order(data_buf_size)); if (!data_buf) return -ENOMEM; @@ -1344,7 +1357,7 @@ static long ioctl_permit_fill(struct file *f, void __user *arg) struct incfs_permit_fill __user *usr_permit_fill = arg; struct incfs_permit_fill permit_fill; long error = 0; - struct file *file = 0; + struct file *file = NULL; if (f->f_op != &incfs_pending_read_file_ops) return -EPERM; @@ -1406,7 +1419,7 @@ static long ioctl_read_file_signature(struct file *f, void __user *arg) if (sig_buf_size > INCFS_MAX_SIGNATURE_SIZE) return -E2BIG; - sig_buffer = kzalloc(sig_buf_size, GFP_NOFS); + sig_buffer = kzalloc(sig_buf_size, GFP_NOFS | __GFP_COMP); if (!sig_buffer) return -ENOMEM; @@ -1892,8 +1905,8 @@ static int file_open(struct inode *inode, struct file *file) int err = 0; get_incfs_backing_path(file->f_path.dentry, &backing_path); - backing_file = dentry_open(&backing_path, O_RDWR | O_NOATIME, - mi->mi_owner); + backing_file = dentry_open( + &backing_path, O_RDWR | O_NOATIME | O_LARGEFILE, mi->mi_owner); path_put(&backing_path); if (IS_ERR(backing_file)) { @@ -2023,6 +2036,45 @@ static void evict_inode(struct inode *inode) clear_inode(inode); } +static int incfs_setattr(struct dentry *dentry, struct iattr *ia) +{ + struct dentry_info *di = get_incfs_dentry(dentry); + struct dentry *backing_dentry; + struct inode *backing_inode; + int error; + + if (ia->ia_valid & ATTR_SIZE) + return -EINVAL; + + if (!di) + return -EINVAL; + backing_dentry = di->backing_path.dentry; + if (!backing_dentry) + return -EINVAL; + + backing_inode = d_inode(backing_dentry); + + /* incfs files are readonly, but the backing files must be writeable */ + if (S_ISREG(backing_inode->i_mode)) { + if ((ia->ia_valid & ATTR_MODE) && (ia->ia_mode & 0222)) + return -EINVAL; + + ia->ia_mode |= 0222; + } + + inode_lock(d_inode(backing_dentry)); + error = notify_change(backing_dentry, ia, NULL); + inode_unlock(d_inode(backing_dentry)); + + if (error) + return error; + + if (S_ISREG(backing_inode->i_mode)) + ia->ia_mode &= ~0222; + + return simple_setattr(dentry, ia); +} + static ssize_t incfs_getxattr(struct dentry *d, const char *name, void *value, size_t size) { diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c index 4200a6fe9599c72695df7d0ff4c0e17ce79bc696..97760cb9bcd75601901142cc116fffa7f35a3b58 100644 --- a/fs/jbd2/commit.c +++ b/fs/jbd2/commit.c @@ -992,9 +992,10 @@ void jbd2_journal_commit_transaction(journal_t *journal) * journalled data) we need to unmap buffer and clear * more bits. We also need to be careful about the check * because the data page mapping can get cleared under - * out hands, which alse need not to clear more bits - * because the page and buffers will be freed and can - * never be reused once we are done with them. + * our hands. Note that if mapping == NULL, we don't + * need to make buffer unmapped because the page is + * already detached from the mapping and buffers cannot + * get reused. */ mapping = READ_ONCE(bh->b_page->mapping); if (mapping && !sb_is_blkdev_sb(mapping->host->i_sb)) { diff --git a/fs/kernfs/symlink.c b/fs/kernfs/symlink.c index 162f43b80c84c78e9f44679dffc80adcd1ba616c..2269e53e487144d8b4ff5879edd6c99967479346 100644 --- a/fs/kernfs/symlink.c +++ b/fs/kernfs/symlink.c @@ -54,6 +54,7 @@ struct kernfs_node *kernfs_create_link(struct kernfs_node *parent, kernfs_put(kn); return ERR_PTR(error); } +EXPORT_SYMBOL_GPL(kernfs_create_link); static int kernfs_get_target_path(struct kernfs_node *parent, struct kernfs_node *target, char *path) diff --git a/fs/namei.c b/fs/namei.c index 8492dc67fc3cddd33a1f5a2deaa686ede21d3c72..6e83cd85547376537488d4f8d0e4854ecdbf00be 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -465,7 +465,7 @@ int inode_permission2(struct vfsmount *mnt, struct inode *inode, int mask) retval = security_inode_permission(inode, mask); return retval; } -EXPORT_SYMBOL(inode_permission2); +EXPORT_SYMBOL_GPL(inode_permission2); int inode_permission(struct inode *inode, int mask) { @@ -2637,7 +2637,7 @@ struct dentry *lookup_one_len2(const char *name, struct vfsmount *mnt, struct de dentry = lookup_dcache(&this, base, 0); return dentry ? dentry : __lookup_slow(&this, base, 0); } -EXPORT_SYMBOL(lookup_one_len2); +EXPORT_SYMBOL_GPL(lookup_one_len2); struct dentry *lookup_one_len(const char *name, struct dentry *base, int len) { @@ -3015,7 +3015,7 @@ int vfs_create2(struct vfsmount *mnt, struct inode *dir, struct dentry *dentry, fsnotify_create(dir, dentry); return error; } -EXPORT_SYMBOL(vfs_create2); +EXPORT_SYMBOL_GPL(vfs_create2); int vfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool want_excl) @@ -3043,7 +3043,7 @@ int vfs_mkobj2(struct vfsmount *mnt, struct dentry *dentry, umode_t mode, fsnotify_create(dir, dentry); return error; } -EXPORT_SYMBOL(vfs_mkobj2); +EXPORT_SYMBOL_GPL(vfs_mkobj2); int vfs_mkobj(struct dentry *dentry, umode_t mode, @@ -3841,7 +3841,7 @@ int vfs_mknod2(struct vfsmount *mnt, struct inode *dir, struct dentry *dentry, u fsnotify_create(dir, dentry); return error; } -EXPORT_SYMBOL(vfs_mknod2); +EXPORT_SYMBOL_GPL(vfs_mknod2); int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev) { @@ -3945,7 +3945,7 @@ int vfs_mkdir2(struct vfsmount *mnt, struct inode *dir, struct dentry *dentry, u fsnotify_mkdir(dir, dentry); return error; } -EXPORT_SYMBOL(vfs_mkdir2); +EXPORT_SYMBOL_GPL(vfs_mkdir2); int vfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) { @@ -4025,7 +4025,7 @@ int vfs_rmdir2(struct vfsmount *mnt, struct inode *dir, struct dentry *dentry) d_delete(dentry); return error; } -EXPORT_SYMBOL(vfs_rmdir2); +EXPORT_SYMBOL_GPL(vfs_rmdir2); int vfs_rmdir(struct inode *dir, struct dentry *dentry) { @@ -4153,7 +4153,7 @@ int vfs_unlink2(struct vfsmount *mnt, struct inode *dir, struct dentry *dentry, return error; } -EXPORT_SYMBOL(vfs_unlink2); +EXPORT_SYMBOL_GPL(vfs_unlink2); int vfs_unlink(struct inode *dir, struct dentry *dentry, struct inode **delegated_inode) { @@ -4273,7 +4273,7 @@ int vfs_symlink2(struct vfsmount *mnt, struct inode *dir, struct dentry *dentry, fsnotify_create(dir, dentry); return error; } -EXPORT_SYMBOL(vfs_symlink2); +EXPORT_SYMBOL_GPL(vfs_symlink2); int vfs_symlink(struct inode *dir, struct dentry *dentry, const char *oldname) { @@ -4401,7 +4401,7 @@ int vfs_link2(struct vfsmount *mnt, struct dentry *old_dentry, struct inode *dir fsnotify_link(dir, inode, new_dentry); return error; } -EXPORT_SYMBOL(vfs_link2); +EXPORT_SYMBOL_GPL(vfs_link2); int vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry, struct inode **delegated_inode) { @@ -4668,7 +4668,7 @@ int vfs_rename2(struct vfsmount *mnt, return error; } -EXPORT_SYMBOL(vfs_rename2); +EXPORT_SYMBOL_GPL(vfs_rename2); int vfs_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, diff --git a/fs/namespace.c b/fs/namespace.c index 3347c1d1decfc137df2806a09b42ab0cb1b5f0d2..92bd0567840808c6e8d436692862711f81fe5b6a 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -3166,8 +3166,8 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root, /* make certain new is below the root */ if (!is_path_reachable(new_mnt, new.dentry, &root)) goto out4; - root_mp->m_count++; /* pin it so it won't go away */ lock_mount_hash(); + root_mp->m_count++; /* pin it so it won't go away */ detach_mnt(new_mnt, &parent_path); detach_mnt(root_mnt, &root_parent); if (root_mnt->mnt.mnt_flags & MNT_LOCKED) { diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c index 3159673549540f063124d62a6cc7098f86ec6ae8..bcc51f131a49614c2590fea6bdc0faa3c09b11b7 100644 --- a/fs/nfs/callback_proc.c +++ b/fs/nfs/callback_proc.c @@ -130,6 +130,8 @@ static struct inode *nfs_layout_find_inode_by_stateid(struct nfs_client *clp, list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) { list_for_each_entry(lo, &server->layouts, plh_layouts) { + if (!pnfs_layout_is_valid(lo)) + continue; if (stateid != NULL && !nfs4_stateid_match_other(stateid, &lo->plh_stateid)) continue; diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index c61bd3fc723ee2ce295d7eea6aeb00e148986736..e5da9d7fb69e9ba9f75881c2782b89629a178c0f 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c @@ -600,6 +600,7 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter) l_ctx = nfs_get_lock_context(dreq->ctx); if (IS_ERR(l_ctx)) { result = PTR_ERR(l_ctx); + nfs_direct_req_release(dreq); goto out_release; } dreq->l_ctx = l_ctx; @@ -1023,6 +1024,7 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter) l_ctx = nfs_get_lock_context(dreq->ctx); if (IS_ERR(l_ctx)) { result = PTR_ERR(l_ctx); + nfs_direct_req_release(dreq); goto out_release; } dreq->l_ctx = l_ctx; diff --git a/fs/nfs/nfs3acl.c b/fs/nfs/nfs3acl.c index 9fce18548f7e848be7fa395766fb2c488dd724af..24d39cce8ba496cf6fe8a64a8a812dd4ddbe401d 100644 --- a/fs/nfs/nfs3acl.c +++ b/fs/nfs/nfs3acl.c @@ -255,37 +255,45 @@ int nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl, int nfs3_set_acl(struct inode *inode, struct posix_acl *acl, int type) { - struct posix_acl *alloc = NULL, *dfacl = NULL; + struct posix_acl *orig = acl, *dfacl = NULL, *alloc; int status; if (S_ISDIR(inode->i_mode)) { switch(type) { case ACL_TYPE_ACCESS: - alloc = dfacl = get_acl(inode, ACL_TYPE_DEFAULT); + alloc = get_acl(inode, ACL_TYPE_DEFAULT); if (IS_ERR(alloc)) goto fail; + dfacl = alloc; break; case ACL_TYPE_DEFAULT: - dfacl = acl; - alloc = acl = get_acl(inode, ACL_TYPE_ACCESS); + alloc = get_acl(inode, ACL_TYPE_ACCESS); if (IS_ERR(alloc)) goto fail; + dfacl = acl; + acl = alloc; break; } } if (acl == NULL) { - alloc = acl = posix_acl_from_mode(inode->i_mode, GFP_KERNEL); + alloc = posix_acl_from_mode(inode->i_mode, GFP_KERNEL); if (IS_ERR(alloc)) goto fail; + acl = alloc; } status = __nfs3_proc_setacls(inode, acl, dfacl); - posix_acl_release(alloc); +out: + if (acl != orig) + posix_acl_release(acl); + if (dfacl != orig) + posix_acl_release(dfacl); return status; fail: - return PTR_ERR(alloc); + status = PTR_ERR(alloc); + goto out; } const struct xattr_handler *nfs3_xattr_handlers[] = { diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index 9cf59e2622f8e284d6cc85742eb0dc2ecd929ef5..5dae7c85d9b6ea680b0f3b24a286ca83fc5f7bd4 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c @@ -865,15 +865,6 @@ static void nfs_pageio_setup_mirroring(struct nfs_pageio_descriptor *pgio, pgio->pg_mirror_count = mirror_count; } -/* - * nfs_pageio_stop_mirroring - stop using mirroring (set mirror count to 1) - */ -void nfs_pageio_stop_mirroring(struct nfs_pageio_descriptor *pgio) -{ - pgio->pg_mirror_count = 1; - pgio->pg_mirror_idx = 0; -} - static void nfs_pageio_cleanup_mirroring(struct nfs_pageio_descriptor *pgio) { pgio->pg_mirror_count = 1; @@ -1302,6 +1293,14 @@ void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index) } } +/* + * nfs_pageio_stop_mirroring - stop using mirroring (set mirror count to 1) + */ +void nfs_pageio_stop_mirroring(struct nfs_pageio_descriptor *pgio) +{ + nfs_pageio_complete(pgio); +} + int __init nfs_init_nfspagecache(void) { nfs_page_cachep = kmem_cache_create("nfs_page", diff --git a/fs/nfs/write.c b/fs/nfs/write.c index ce1da8cbac0038ba41b014f2f2bd0c126b15a6bc..63d20308a9bb7e8b0e1b71104249d749e35b4a4d 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -432,6 +432,7 @@ nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list, } subreq->wb_head = subreq; + nfs_release_request(old_head); if (test_and_clear_bit(PG_INODE_REF, &subreq->wb_flags)) { nfs_release_request(subreq); diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index ed73e86194fac4ff1de08a62ecf18d6d5ea125d6..c24306af9758f38e70f9e74764b15547e184c83d 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -252,6 +252,8 @@ find_or_allocate_block(struct nfs4_lockowner *lo, struct knfsd_fh *fh, if (!nbl) { nbl= kmalloc(sizeof(*nbl), GFP_KERNEL); if (nbl) { + INIT_LIST_HEAD(&nbl->nbl_list); + INIT_LIST_HEAD(&nbl->nbl_lru); fh_copy_shallow(&nbl->nbl_fh, fh); locks_init_lock(&nbl->nbl_lock); nfsd4_init_cb(&nbl->nbl_cb, lo->lo_owner.so_client, diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index a342f008e42f9e795bedd6312ee50dedd9c99783..ff0e083ce2a1a957bd24c38b15963a5bd0c9d123 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c @@ -7403,6 +7403,10 @@ int ocfs2_truncate_inline(struct inode *inode, struct buffer_head *di_bh, struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; struct ocfs2_inline_data *idata = &di->id2.i_data; + /* No need to punch hole beyond i_size. */ + if (start >= i_size_read(inode)) + return 0; + if (end > i_size_read(inode)) end = i_size_read(inode); diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c index 595ea0d83dd4e4c2a17dced1f091b934c98bb704..7fe9bbb2045d37025f4beab408b60f8691e933dd 100644 --- a/fs/overlayfs/inode.c +++ b/fs/overlayfs/inode.c @@ -899,7 +899,7 @@ struct inode *ovl_get_inode(struct super_block *sb, struct dentry *lowerdentry = lowerpath ? lowerpath->dentry : NULL; bool bylower = ovl_hash_bylower(sb, upperdentry, lowerdentry, oip->index); - int fsid = bylower ? oip->lowerpath->layer->fsid : 0; + int fsid = bylower ? lowerpath->layer->fsid : 0; bool is_dir, metacopy = false; unsigned long ino = 0; int err = oip->newinode ? -EEXIST : -ENOMEM; @@ -949,6 +949,8 @@ struct inode *ovl_get_inode(struct super_block *sb, err = -ENOMEM; goto out_err; } + ino = realinode->i_ino; + fsid = lowerpath->layer->fsid; } ovl_fill_inode(inode, realinode->i_mode, realinode->i_rdev, ino, fsid); ovl_inode_init(inode, upperdentry, lowerdentry, oip->lowerdata); diff --git a/fs/pnode.c b/fs/pnode.c index 681916df422c777a8743608623258d1ffecbe501..1fc2e47d130827a3897d5309abcfbbd79ef77b34 100644 --- a/fs/pnode.c +++ b/fs/pnode.c @@ -266,14 +266,13 @@ static int propagate_one(struct mount *m) if (IS_ERR(child)) return PTR_ERR(child); child->mnt.mnt_flags &= ~MNT_LOCKED; + read_seqlock_excl(&mount_lock); mnt_set_mountpoint(m, mp, child); + if (m->mnt_master != dest_master) + SET_MNT_MARK(m->mnt_master); + read_sequnlock_excl(&mount_lock); last_dest = m; last_source = child; - if (m->mnt_master != dest_master) { - read_seqlock_excl(&mount_lock); - SET_MNT_MARK(m->mnt_master); - read_sequnlock_excl(&mount_lock); - } hlist_add_head(&child->mnt_hash, list); return count_mounts(m->mnt_ns, child); } diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c index 5c5f161763c8c592165049c974cb7016cc6e4168..c4147e50af98a94290305aa4b8ac7cfe46c21cac 100644 --- a/fs/proc/vmcore.c +++ b/fs/proc/vmcore.c @@ -250,7 +250,8 @@ static int vmcoredd_mmap_dumps(struct vm_area_struct *vma, unsigned long dst, if (start < offset + dump->size) { tsz = min(offset + (u64)dump->size - start, (u64)size); buf = dump->buf + start - offset; - if (remap_vmalloc_range_partial(vma, dst, buf, tsz)) { + if (remap_vmalloc_range_partial(vma, dst, buf, 0, + tsz)) { ret = -EFAULT; goto out_unlock; } @@ -607,7 +608,7 @@ static int mmap_vmcore(struct file *file, struct vm_area_struct *vma) tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)start, size); kaddr = elfnotes_buf + start - elfcorebuf_sz - vmcoredd_orig_sz; if (remap_vmalloc_range_partial(vma, vma->vm_start + len, - kaddr, tsz)) + kaddr, 0, tsz)) goto fail; size -= tsz; diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c index 6f90d91a8733ad68aa94d71b4caba623cdee0409..6f51c6d7b965a929f111a5408c0d940b6218533d 100644 --- a/fs/pstore/inode.c +++ b/fs/pstore/inode.c @@ -99,11 +99,11 @@ static void *pstore_ftrace_seq_next(struct seq_file *s, void *v, loff_t *pos) struct pstore_private *ps = s->private; struct pstore_ftrace_seq_data *data = v; + (*pos)++; data->off += REC_SIZE; if (data->off + REC_SIZE > ps->total_size) return NULL; - (*pos)++; return data; } @@ -113,6 +113,9 @@ static int pstore_ftrace_seq_show(struct seq_file *s, void *v) struct pstore_ftrace_seq_data *data = v; struct pstore_ftrace_record *rec; + if (!data) + return 0; + rec = (struct pstore_ftrace_record *)(ps->record->buf + data->off); seq_printf(s, "CPU:%d ts:%llu %08lx %08lx %pf <- %pF\n", diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c index 4bae3f4fe829dbb64810be3ccb148eb4d8b40dcb..dcd9c3163587c120b6f9fc57add528f0d3d2f88f 100644 --- a/fs/pstore/platform.c +++ b/fs/pstore/platform.c @@ -802,9 +802,9 @@ static int __init pstore_init(void) ret = pstore_init_fs(); if (ret) - return ret; + free_buf_for_compression(); - return 0; + return ret; } late_initcall(pstore_init); diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c index 245483cc282ba0032f5f0a16649d5b7dbd45cffb..901f27ac94abc0c16a6a4065478e0a6a3791b551 100644 --- a/fs/xfs/xfs_icache.c +++ b/fs/xfs/xfs_icache.c @@ -902,7 +902,12 @@ xfs_eofblocks_worker( { struct xfs_mount *mp = container_of(to_delayed_work(work), struct xfs_mount, m_eofblocks_work); + + if (!sb_start_write_trylock(mp->m_super)) + return; xfs_icache_free_eofblocks(mp, NULL); + sb_end_write(mp->m_super); + xfs_queue_eofblocks(mp); } @@ -929,7 +934,12 @@ xfs_cowblocks_worker( { struct xfs_mount *mp = container_of(to_delayed_work(work), struct xfs_mount, m_cowblocks_work); + + if (!sb_start_write_trylock(mp->m_super)) + return; xfs_icache_free_cowblocks(mp, NULL); + sb_end_write(mp->m_super); + xfs_queue_cowblocks(mp); } diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 5ed84d6c70597f02e0accd3bc907e1773d1e2293..f2d06e1e49066614d44613d0e46dec786d3077e7 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -2949,7 +2949,8 @@ xfs_rename( spaceres); /* - * Set up the target. + * Check for expected errors before we dirty the transaction + * so we can return an error without a transaction abort. */ if (target_ip == NULL) { /* @@ -2961,6 +2962,46 @@ xfs_rename( if (error) goto out_trans_cancel; } + } else { + /* + * If target exists and it's a directory, check that whether + * it can be destroyed. + */ + if (S_ISDIR(VFS_I(target_ip)->i_mode) && + (!xfs_dir_isempty(target_ip) || + (VFS_I(target_ip)->i_nlink > 2))) { + error = -EEXIST; + goto out_trans_cancel; + } + } + + /* + * Directory entry creation below may acquire the AGF. Remove + * the whiteout from the unlinked list first to preserve correct + * AGI/AGF locking order. This dirties the transaction so failures + * after this point will abort and log recovery will clean up the + * mess. + * + * For whiteouts, we need to bump the link count on the whiteout + * inode. After this point, we have a real link, clear the tmpfile + * state flag from the inode so it doesn't accidentally get misused + * in future. + */ + if (wip) { + ASSERT(VFS_I(wip)->i_nlink == 0); + error = xfs_iunlink_remove(tp, wip); + if (error) + goto out_trans_cancel; + + xfs_bumplink(tp, wip); + xfs_trans_log_inode(tp, wip, XFS_ILOG_CORE); + VFS_I(wip)->i_state &= ~I_LINKABLE; + } + + /* + * Set up the target. + */ + if (target_ip == NULL) { /* * If target does not exist and the rename crosses * directories, adjust the target directory link count @@ -2980,22 +3021,6 @@ xfs_rename( goto out_trans_cancel; } } else { /* target_ip != NULL */ - /* - * If target exists and it's a directory, check that both - * target and source are directories and that target can be - * destroyed, or that neither is a directory. - */ - if (S_ISDIR(VFS_I(target_ip)->i_mode)) { - /* - * Make sure target dir is empty. - */ - if (!(xfs_dir_isempty(target_ip)) || - (VFS_I(target_ip)->i_nlink > 2)) { - error = -EEXIST; - goto out_trans_cancel; - } - } - /* * Link the source inode under the target name. * If the source inode is a directory and we are moving @@ -3086,32 +3111,6 @@ xfs_rename( if (error) goto out_trans_cancel; - /* - * For whiteouts, we need to bump the link count on the whiteout inode. - * This means that failures all the way up to this point leave the inode - * on the unlinked list and so cleanup is a simple matter of dropping - * the remaining reference to it. If we fail here after bumping the link - * count, we're shutting down the filesystem so we'll never see the - * intermediate state on disk. - */ - if (wip) { - ASSERT(VFS_I(wip)->i_nlink == 0); - error = xfs_bumplink(tp, wip); - if (error) - goto out_trans_cancel; - error = xfs_iunlink_remove(tp, wip); - if (error) - goto out_trans_cancel; - xfs_trans_log_inode(tp, wip, XFS_ILOG_CORE); - - /* - * Now we have a real link, clear the "I'm a tmpfile" state - * flag from the inode so it doesn't accidentally get misused in - * future. - */ - VFS_I(wip)->i_state &= ~I_LINKABLE; - } - xfs_trans_ichgtime(tp, src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); xfs_trans_log_inode(tp, src_dp, XFS_ILOG_CORE); if (new_parent) diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c index bad90479ade2a4c13db8bc0404597f97324a87d0..6ffb53edf1b4a1b5eea3cedb33e61561348c4e5d 100644 --- a/fs/xfs/xfs_ioctl.c +++ b/fs/xfs/xfs_ioctl.c @@ -2182,7 +2182,10 @@ xfs_file_ioctl( if (error) return error; - return xfs_icache_free_eofblocks(mp, &keofb); + sb_start_write(mp->m_super); + error = xfs_icache_free_eofblocks(mp, &keofb); + sb_end_write(mp->m_super); + return error; } default: diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c index f3c393f309e19c8b99e6f2c683c84766efa6c1a8..6622652a85a809ed86f7b764c74f26847c7dfcec 100644 --- a/fs/xfs/xfs_reflink.c +++ b/fs/xfs/xfs_reflink.c @@ -1058,6 +1058,7 @@ xfs_reflink_remap_extent( uirec.br_startblock = irec->br_startblock + rlen; uirec.br_startoff = irec->br_startoff + rlen; uirec.br_blockcount = unmap_len - rlen; + uirec.br_state = irec->br_state; unmap_len = rlen; /* If this isn't a real mapping, we're done. */ diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c index d3a4e89bf4a0ddb916ed4f5d395285e2e2188869..66f167aefd94f8b428ef598523df36c13b59a245 100644 --- a/fs/xfs/xfs_trans_ail.c +++ b/fs/xfs/xfs_trans_ail.c @@ -520,8 +520,9 @@ xfsaild( { struct xfs_ail *ailp = data; long tout = 0; /* milliseconds */ + unsigned int noreclaim_flag; - current->flags |= PF_MEMALLOC; + noreclaim_flag = memalloc_noreclaim_save(); set_freezable(); while (1) { @@ -592,6 +593,7 @@ xfsaild( tout = xfsaild_push(ailp); } + memalloc_noreclaim_restore(noreclaim_flag); return 0; } diff --git a/include/acpi/processor.h b/include/acpi/processor.h index 1194a4c78d557fb411e9672291f6bab6e3623e9d..5b9eab15a1e6c213020338d0eb16e8212d729945 100644 --- a/include/acpi/processor.h +++ b/include/acpi/processor.h @@ -293,6 +293,14 @@ static inline void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx } #endif +static inline int call_on_cpu(int cpu, long (*fn)(void *), void *arg, + bool direct) +{ + if (direct || (is_percpu_thread() && cpu == smp_processor_id())) + return fn(arg); + return work_on_cpu(cpu, fn, arg); +} + /* in processor_perflib.c */ #ifdef CONFIG_CPU_FREQ diff --git a/include/asm-generic/vdso/vsyscall.h b/include/asm-generic/vdso/vsyscall.h new file mode 100644 index 0000000000000000000000000000000000000000..e94b19782c92ff4f79ff75964a61b3b207fdda94 --- /dev/null +++ b/include/asm-generic/vdso/vsyscall.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_VSYSCALL_H +#define __ASM_GENERIC_VSYSCALL_H + +#ifndef __ASSEMBLY__ + +#ifndef __arch_get_k_vdso_data +static __always_inline struct vdso_data *__arch_get_k_vdso_data(void) +{ + return NULL; +} +#endif /* __arch_get_k_vdso_data */ + +#ifndef __arch_update_vdso_data +static __always_inline int __arch_update_vdso_data(void) +{ + return 0; +} +#endif /* __arch_update_vdso_data */ + +#ifndef __arch_get_clock_mode +static __always_inline int __arch_get_clock_mode(struct timekeeper *tk) +{ + return 0; +} +#endif /* __arch_get_clock_mode */ + +#ifndef __arch_use_vsyscall +static __always_inline int __arch_use_vsyscall(struct vdso_data *vdata) +{ + return 1; +} +#endif /* __arch_use_vsyscall */ + +#ifndef __arch_update_vsyscall +static __always_inline void __arch_update_vsyscall(struct vdso_data *vdata, + struct timekeeper *tk) +{ +} +#endif /* __arch_update_vsyscall */ + +#ifndef __arch_sync_vdso_data +static __always_inline void __arch_sync_vdso_data(struct vdso_data *vdata) +{ +} +#endif /* __arch_sync_vdso_data */ + +#endif /* !__ASSEMBLY__ */ + +#endif /* __ASM_GENERIC_VSYSCALL_H */ diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h index 4d58966b541b4d170313c775fe73c243a309cd58..a3c02470cc390b6b0eaba4fc47b7e5165eeec052 100644 --- a/include/drm/drm_connector.h +++ b/include/drm/drm_connector.h @@ -39,6 +39,7 @@ struct drm_encoder; struct drm_property; struct drm_property_blob; struct drm_printer; +struct drm_panel; struct edid; enum drm_connector_force { @@ -205,6 +206,40 @@ enum drm_panel_orientation { DRM_MODE_PANEL_ORIENTATION_RIGHT_UP, }; +/* + * This is a consolidated colorimetry list supported by HDMI and + * DP protocol standard. The respective connectors will register + * a property with the subset of this list (supported by that + * respective protocol). Userspace will set the colorspace through + * a colorspace property which will be created and exposed to + * userspace. + */ + +/* For Default case, driver will set the colorspace */ +#define DRM_MODE_COLORIMETRY_DEFAULT 0 +/* CEA 861 Normal Colorimetry options */ +#define DRM_MODE_COLORIMETRY_NO_DATA 0 +#define DRM_MODE_COLORIMETRY_SMPTE_170M_YCC 1 +#define DRM_MODE_COLORIMETRY_BT709_YCC 2 +/* CEA 861 Extended Colorimetry Options */ +#define DRM_MODE_COLORIMETRY_XVYCC_601 3 +#define DRM_MODE_COLORIMETRY_XVYCC_709 4 +#define DRM_MODE_COLORIMETRY_SYCC_601 5 +#define DRM_MODE_COLORIMETRY_OPYCC_601 6 +#define DRM_MODE_COLORIMETRY_OPRGB 7 +#define DRM_MODE_COLORIMETRY_BT2020_CYCC 8 +#define DRM_MODE_COLORIMETRY_BT2020_RGB 9 +#define DRM_MODE_COLORIMETRY_BT2020_YCC 10 +/* Additional Colorimetry extension added as part of CTA 861.G */ +#define DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65 11 +#define DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER 12 +/* DP MSA Colorimetry Options */ +#define DRM_MODE_DP_COLORIMETRY_BT601_YCC 13 +#define DRM_MODE_DP_COLORIMETRY_BT709_YCC 14 +#define DRM_MODE_DP_COLORIMETRY_SRGB 15 +#define DRM_MODE_DP_COLORIMETRY_RGB_WIDE_GAMUT 16 +#define DRM_MODE_DP_COLORIMETRY_SCRGB 17 + /** * struct drm_display_info - runtime data about the connected sink * @@ -448,6 +483,13 @@ struct drm_connector_state { */ unsigned int content_protection; + /** + * @colorspace: State variable for Connector property to request + * colorspace change on Sink. This is most commonly used to switch + * to wider color gamuts like BT2020. + */ + u32 colorspace; + /** * @writeback_job: Writeback job for writeback connectors * @@ -915,6 +957,12 @@ struct drm_connector { */ struct drm_property *content_protection_property; + /** + * @colorspace_property: Connector property to set the suitable + * colorspace supported by the sink. + */ + struct drm_property *colorspace_property; + /** * @path_blob_ptr: * @@ -1007,6 +1055,54 @@ struct drm_connector { /** @bad_edid_counter: track sinks that give us an EDID with invalid checksum */ unsigned bad_edid_counter; + /* + * @pt_scan_info: PT scan info obtained from the VCDB of EDID + * @it_scan_info: IT scan info obtained from the VCDB of EDID + * @ce_scan_info: CE scan info obtained from the VCDB of EDID + * @color_enc_fmt: Colorimetry encoding formats of sink + * @hdr_eotf: Electro optical transfer function obtained from HDR block + * @hdr_metadata_type_one: Metadata type one obtained from HDR block + * @hdr_max_luminance: desired max luminance obtained from HDR block + * @hdr_avg_luminance: desired avg luminance obtained from HDR block + * @hdr_min_luminance: desired min luminance obtained from HDR block + * @hdr_supported: does the sink support HDR content + * @max_tmds_char: indicates the maximum TMDS Character Rate supported + * @scdc_present: when set the sink supports SCDC functionality + * @rr_capable: when set the sink is capable of initiating an + * SCDC read request + * @supports_scramble: when set the sink supports less than + * 340Mcsc scrambling + * @flags_3d: 3D view(s) supported by the sink, see drm_edid.h + * DRM_EDID_3D_*) + */ + u8 pt_scan_info; + u8 it_scan_info; + u8 ce_scan_info; + u32 color_enc_fmt; + u32 hdr_eotf; + bool hdr_metadata_type_one; + u32 hdr_max_luminance; + u32 hdr_avg_luminance; + u32 hdr_min_luminance; + bool hdr_supported; + u8 hdr_plus_app_ver; + + /* EDID bits HDMI 2.0 + * @max_tmds_char: indicates the maximum TMDS Character Rate supported + * @scdc_present: when set the sink supports SCDC functionality + * @rr_capable: when set the sink is capable of initiating an + * SCDC read request + * @supports_scramble: when set the sink supports less than + * 340Mcsc scrambling + * @flags_3d: 3D view(s) supported by the sink, see drm_edid.h + * (DRM_EDID_3D_*) + */ + int max_tmds_char; /* in Mcsc */ + bool scdc_present; + bool rr_capable; + bool supports_scramble; + int flags_3d; + /** * @edid_corrupt: Indicates whether the last read EDID was corrupt. Used * in Displayport compliance testing - Displayport Link CTS Core 1.2 @@ -1075,6 +1171,20 @@ struct drm_connector { * &drm_mode_config.connector_free_work. */ struct llist_node free_node; + + /** + * @panel: + * + * Can find the panel which connected to drm_connector. + */ + struct drm_panel *panel; + + /** + * @checksum: + * + * The calculated checksum value of first 127 bytes of associated EDID. + */ + u8 checksum; }; #define obj_to_connector(x) container_of(x, struct drm_connector, base) @@ -1186,6 +1296,7 @@ int drm_connector_attach_scaling_mode_property(struct drm_connector *connector, int drm_connector_attach_content_protection_property( struct drm_connector *connector); int drm_mode_create_aspect_ratio_property(struct drm_device *dev); +int drm_mode_create_colorspace_property(struct drm_connector *connector); int drm_mode_create_content_type_property(struct drm_device *dev); void drm_hdmi_avi_infoframe_content_type(struct hdmi_avi_infoframe *frame, const struct drm_connector_state *conn_state); diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h index 05cc31b5db161070734a65aacd7d45fad5b43884..c1078e5f9ca2e6638087a3c37de429366aa57d8e 100644 --- a/include/drm/drm_dp_helper.h +++ b/include/drm/drm_dp_helper.h @@ -627,6 +627,14 @@ # define DP_TEST_COUNT_MASK 0xf #define DP_TEST_PHY_PATTERN 0x248 +# define DP_TEST_PHY_PATTERN_NONE 0x0 +# define DP_TEST_PHY_PATTERN_D10_2_NO_SCRAMBLING 0x1 +# define DP_TEST_PHY_PATTERN_SYMBOL_ERR_MEASUREMENT_CNT 0x2 +# define DP_TEST_PHY_PATTERN_PRBS7 0x3 +# define DP_TEST_PHY_PATTERN_80_BIT_CUSTOM_PATTERN 0x4 +# define DP_TEST_PHY_PATTERN_CP2520_PATTERN_1 0x5 +# define DP_TEST_PHY_PATTERN_CP2520_PATTERN_2 0x6 +# define DP_TEST_PHY_PATTERN_CP2520_PATTERN_3 0x7 #define DP_TEST_80BIT_CUSTOM_PATTERN_7_0 0x250 #define DP_TEST_80BIT_CUSTOM_PATTERN_15_8 0x251 #define DP_TEST_80BIT_CUSTOM_PATTERN_23_16 0x252 @@ -1143,6 +1151,7 @@ struct drm_dp_aux { struct device *dev; struct drm_crtc *crtc; struct mutex hw_mutex; + struct mutex i2c_mutex; struct work_struct crc_work; u8 crc_count; ssize_t (*transfer)(struct drm_dp_aux *aux, diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h index 0f7439f0bb2f7c0f8c282cdebe01378b0cf33e1f..864b2f8d886a2fea6c82e495eab7e9a4b2213ef5 100644 --- a/include/drm/drm_dp_mst_helper.h +++ b/include/drm/drm_dp_mst_helper.h @@ -27,6 +27,7 @@ #include struct drm_dp_mst_branch; +struct drm_dp_mst_port; /** * struct drm_dp_vcpi - Virtual Channel Payload Identifier @@ -42,6 +43,18 @@ struct drm_dp_vcpi { int num_slots; }; +struct drm_dp_mst_dsc_dpcd_cache { + bool valid; + bool use_parent_dpcd; + u8 dsc_dpcd[16]; +}; + +struct drm_dp_mst_dsc_info { + bool dsc_support; + struct drm_dp_mst_port *dsc_port; + struct drm_dp_mst_dsc_dpcd_cache dsc_dpcd_cache; +}; + /** * struct drm_dp_mst_port - MST port * @kref: reference count for this port. @@ -98,6 +111,10 @@ struct drm_dp_mst_port { * audio-capable. */ bool has_audio; + + bool fec_capable; + + struct drm_dp_mst_dsc_info dsc_info; }; /** @@ -291,6 +308,7 @@ struct drm_dp_port_number_req { struct drm_dp_enum_path_resources_ack_reply { u8 port_number; + bool fec_capable; u16 full_payload_bw_number; u16 avail_payload_bw_number; }; @@ -588,6 +606,10 @@ enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port); + +bool drm_dp_mst_has_fec(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port); + struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port); @@ -634,4 +656,24 @@ int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state, int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, bool power_up); +int drm_dp_mst_get_dsc_info(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port, + struct drm_dp_mst_dsc_info *dsc_info); + +int drm_dp_mst_update_dsc_info(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port, + struct drm_dp_mst_dsc_info *dsc_info); + +int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port, + int offset, int size, u8 *bytes); + +int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port, + int offset, int size, u8 *bytes); + +int drm_dp_mst_get_max_sdp_streams_supported( + struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port); + #endif diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h index 152b3055e9e1feb449b682c8102df23d18a64df5..12c76a5b546ccb04da0fb2fbba1b4a23d332be63 100644 --- a/include/drm/drm_drv.h +++ b/include/drm/drm_drv.h @@ -29,6 +29,7 @@ #include #include +#include #include @@ -496,6 +497,15 @@ struct drm_driver { int (*gem_prime_mmap)(struct drm_gem_object *obj, struct vm_area_struct *vma); + /** + * @gem_prime_get_uuid + * + * get_uuid hook for GEM drivers. Retrieves the virtio uuid of the + * given GEM buffer. + */ + int (*gem_prime_get_uuid)(struct drm_gem_object *obj, + uuid_t *uuid); + /** * @dumb_create: * diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h index 53be104aab5ce46e77b0a77fcc73e9c5d6a44d67..7e39d599e12cf329c04bc5543f6de64070ee8fb9 100644 --- a/include/drm/drm_edid.h +++ b/include/drm/drm_edid.h @@ -221,6 +221,16 @@ struct detailed_timing { DRM_EDID_YCBCR420_DC_36 | \ DRM_EDID_YCBCR420_DC_30) +#define DRM_EDID_CLRMETRY_xvYCC_601 (1 << 0) +#define DRM_EDID_CLRMETRY_xvYCC_709 (1 << 1) +#define DRM_EDID_CLRMETRY_sYCC_601 (1 << 2) +#define DRM_EDID_CLRMETRY_ADBYCC_601 (1 << 3) +#define DRM_EDID_CLRMETRY_ADB_RGB (1 << 4) +#define DRM_EDID_CLRMETRY_BT2020_CYCC (1 << 5) +#define DRM_EDID_CLRMETRY_BT2020_YCC (1 << 6) +#define DRM_EDID_CLRMETRY_BT2020_RGB (1 << 7) +#define DRM_EDID_CLRMETRY_DCI_P3 (1 << 15) + /* ELD Header Block */ #define DRM_ELD_HEADER_BLOCK_SIZE 4 @@ -279,6 +289,11 @@ struct detailed_timing { #define DRM_ELD_CEA_SAD(mnl, sad) (20 + (mnl) + 3 * (sad)) +/* HDMI 2.0 */ +#define DRM_EDID_3D_INDEPENDENT_VIEW (1 << 2) +#define DRM_EDID_3D_DUAL_VIEW (1 << 1) +#define DRM_EDID_3D_OSD_DISPARITY (1 << 0) + struct edid { u8 header[8]; /* Vendor & product info */ diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h index 4fef19064b0f12cad6b70614112493fd54e51729..b208412dd52dec9b8db775bf6eb460572d104dbb 100644 --- a/include/drm/drm_mipi_dsi.h +++ b/include/drm/drm_mipi_dsi.h @@ -21,12 +21,18 @@ struct mipi_dsi_device; #define MIPI_DSI_MSG_REQ_ACK BIT(0) /* use Low Power Mode to transmit message */ #define MIPI_DSI_MSG_USE_LPM BIT(1) +/* read mipi_dsi_msg.ctrl and unicast to only that ctrls */ +#define MIPI_DSI_MSG_UNICAST BIT(2) +/* Stack all commands until lastcommand bit and trigger all in one go */ +#define MIPI_DSI_MSG_LASTCOMMAND BIT(3) /** * struct mipi_dsi_msg - read/write DSI buffer * @channel: virtual channel id * @type: payload data type * @flags: flags controlling this message transmission + * @ctrl: ctrl index to transmit on + * @wait_ms: duration in ms to wait after message transmission * @tx_len: length of @tx_buf * @tx_buf: data to be written * @rx_len: length of @rx_buf @@ -36,6 +42,8 @@ struct mipi_dsi_msg { u8 channel; u8 type; u16 flags; + u32 ctrl; + u32 wait_ms; size_t tx_len; const void *tx_buf; @@ -134,6 +142,10 @@ struct mipi_dsi_host *of_find_mipi_dsi_host_by_node(struct device_node *node); #define MIPI_DSI_CLOCK_NON_CONTINUOUS BIT(10) /* transmit data in low power */ #define MIPI_DSI_MODE_LPM BIT(11) +/* disable BLLP area */ +#define MIPI_DSI_MODE_VIDEO_BLLP BIT(12) +/* disable EOF BLLP area */ +#define MIPI_DSI_MODE_VIDEO_EOF_BLLP BIT(13) enum mipi_dsi_pixel_format { MIPI_DSI_FMT_RGB888, @@ -271,9 +283,9 @@ int mipi_dsi_dcs_set_tear_on(struct mipi_dsi_device *dsi, int mipi_dsi_dcs_set_pixel_format(struct mipi_dsi_device *dsi, u8 format); int mipi_dsi_dcs_set_tear_scanline(struct mipi_dsi_device *dsi, u16 scanline); int mipi_dsi_dcs_set_display_brightness(struct mipi_dsi_device *dsi, - u16 brightness); + u16 brightness, size_t num_params); int mipi_dsi_dcs_get_display_brightness(struct mipi_dsi_device *dsi, - u16 *brightness); + u16 *brightness, size_t num_params); /** * struct mipi_dsi_driver - DSI driver diff --git a/include/drm/drm_mode_object.h b/include/drm/drm_mode_object.h index c34a3e8030e12c1cd2957c55780a11004edf967d..6292fa663844463d9d9e89044dad0e7dc198e14c 100644 --- a/include/drm/drm_mode_object.h +++ b/include/drm/drm_mode_object.h @@ -60,7 +60,7 @@ struct drm_mode_object { void (*free_cb)(struct kref *kref); }; -#define DRM_OBJECT_MAX_PROPERTY 24 +#define DRM_OBJECT_MAX_PROPERTY 64 /** * struct drm_object_properties - property tracking for &drm_mode_object */ diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h index 675aa1e876ce6f633cb55c910940fa54e40f2a90..6907d319ff19e1884d887954e89755dded98ba6f 100644 --- a/include/drm/drm_panel.h +++ b/include/drm/drm_panel.h @@ -27,6 +27,25 @@ #include #include #include +#include + +/* A hardware display blank change occurred */ +#define DRM_PANEL_EVENT_BLANK 0x01 +/* A hardware display blank early change occurred */ +#define DRM_PANEL_EARLY_EVENT_BLANK 0x02 + +enum { + /* panel: power on */ + DRM_PANEL_BLANK_UNBLANK, + /* panel: power off */ + DRM_PANEL_BLANK_POWERDOWN, +}; + +struct drm_panel_notifier { + int refresh_rate; + void *data; + uint32_t id; +}; struct device_node; struct drm_connector; @@ -94,6 +113,13 @@ struct drm_panel { const struct drm_panel_funcs *funcs; struct list_head list; + + /** + * @nh: + * + * panel notifier list head + */ + struct blocking_notifier_head nh; }; /** @@ -195,6 +221,13 @@ void drm_panel_remove(struct drm_panel *panel); int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector); int drm_panel_detach(struct drm_panel *panel); +int drm_panel_notifier_register(struct drm_panel *panel, + struct notifier_block *nb); +int drm_panel_notifier_unregister(struct drm_panel *panel, + struct notifier_block *nb); +int drm_panel_notifier_call_chain(struct drm_panel *panel, + unsigned long val, void *v); + #if defined(CONFIG_OF) && defined(CONFIG_DRM_PANEL) struct drm_panel *of_drm_find_panel(const struct device_node *np); #else diff --git a/include/drm/drm_prime.h b/include/drm/drm_prime.h index 0c86d755cad93bfaf2e6c04b2f4ce2f26aad4c3a..a6f60d529b8cadabade69d9cc7605b5c5ff9a32f 100644 --- a/include/drm/drm_prime.h +++ b/include/drm/drm_prime.h @@ -104,5 +104,6 @@ int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages, struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages); void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg); +int drm_gem_dmabuf_get_uuid(struct dma_buf *dma_buf, uuid_t *uuid); #endif /* __DRM_PRIME_H__ */ diff --git a/include/keys/big_key-type.h b/include/keys/big_key-type.h index e0970a578188ccaf563340f115e2aaeadb65267e..a7207a965466322c20ab9dfe9e19698f6072599f 100644 --- a/include/keys/big_key-type.h +++ b/include/keys/big_key-type.h @@ -21,6 +21,6 @@ extern void big_key_free_preparse(struct key_preparsed_payload *prep); extern void big_key_revoke(struct key *key); extern void big_key_destroy(struct key *key); extern void big_key_describe(const struct key *big_key, struct seq_file *m); -extern long big_key_read(const struct key *key, char __user *buffer, size_t buflen); +extern long big_key_read(const struct key *key, char *buffer, size_t buflen); #endif /* _KEYS_BIG_KEY_TYPE_H */ diff --git a/include/keys/user-type.h b/include/keys/user-type.h index 12babe9915944f84a040199451124763d15d4bd0..0d8f3cd3056f01ef74acdf9e63c80d16755607f3 100644 --- a/include/keys/user-type.h +++ b/include/keys/user-type.h @@ -45,8 +45,7 @@ extern int user_update(struct key *key, struct key_preparsed_payload *prep); extern void user_revoke(struct key *key); extern void user_destroy(struct key *key); extern void user_describe(const struct key *user, struct seq_file *m); -extern long user_read(const struct key *key, - char __user *buffer, size_t buflen); +extern long user_read(const struct key *key, char *buffer, size_t buflen); static inline const struct user_key_payload *user_key_payload_rcu(const struct key *key) { diff --git a/include/linux/android_kabi.h b/include/linux/android_kabi.h new file mode 100644 index 0000000000000000000000000000000000000000..efa52953fe35148ff519f385e4b95ba1f7f7ab96 --- /dev/null +++ b/include/linux/android_kabi.h @@ -0,0 +1,113 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * android_kabi.h - Android kernel abi abstraction header + * + * Copyright (C) 2020 Google, Inc. + * + * Heavily influenced by rh_kabi.h which came from the RHEL/CENTOS kernel and + * was: + * Copyright (c) 2014 Don Zickus + * Copyright (c) 2015-2018 Jiri Benc + * Copyright (c) 2015 Sabrina Dubroca, Hannes Frederic Sowa + * Copyright (c) 2016-2018 Prarit Bhargava + * Copyright (c) 2017 Paolo Abeni, Larry Woodman + * + * These macros are to be used to try to help alleviate future kernel abi + * changes that will occur as LTS and other kernel patches are merged into the + * tree during a period in which the kernel abi is wishing to not be disturbed. + * + * There are two times these macros should be used: + * - Before the kernel abi is "frozen" + * Padding can be added to various kernel structures that have in the past + * been known to change over time. That will give "room" in the structure + * that can then be used when fields are added so that the structure size + * will not change. + * + * - After the kernel abi is "frozen" + * If a structure's field is changed to a type that is identical in size to + * the previous type, it can be changed with a union macro + * If a field is added to a structure, the padding fields can be used to add + * the new field in a "safe" way. + */ +#ifndef _ANDROID_KABI_H +#define _ANDROID_KABI_H + +#include + +/* + * Worker macros, don't use these, use the ones without a leading '_' + */ + +#define __ANDROID_KABI_CHECK_SIZE_ALIGN(_orig, _new) \ + union { \ + _Static_assert(sizeof(struct{_new;}) <= sizeof(struct{_orig;}), \ + __FILE__ ":" __stringify(__LINE__) ": " \ + __stringify(_new) \ + " is larger than " \ + __stringify(_orig) ); \ + _Static_assert(__alignof__(struct{_new;}) <= __alignof__(struct{_orig;}), \ + __FILE__ ":" __stringify(__LINE__) ": " \ + __stringify(_orig) \ + " is not aligned the same as " \ + __stringify(_new) ); \ + } + +#ifdef __GENKSYMS__ + +#define _ANDROID_KABI_REPLACE(_orig, _new) _orig + +#else + +#define _ANDROID_KABI_REPLACE(_orig, _new) \ + union { \ + _new; \ + struct { \ + _orig; \ + } __UNIQUE_ID(android_kabi_hide); \ + __ANDROID_KABI_CHECK_SIZE_ALIGN(_orig, _new); \ + } + +#endif /* __GENKSYMS__ */ + +#define _ANDROID_KABI_RESERVE(n) u64 android_kabi_reserved##n + + +/* + * Macros to use _before_ the ABI is frozen + */ + +/* + * ANDROID_KABI_RESERVE + * Reserve some "padding" in a structure for potential future use. + * This normally placed at the end of a structure. + * number: the "number" of the padding variable in the structure. Start with + * 1 and go up. + */ +#define ANDROID_KABI_RESERVE(number) _ANDROID_KABI_RESERVE(number) + + +/* + * Macros to use _after_ the ABI is frozen + */ + +/* + * ANDROID_KABI_USE(number, _new) + * Use a previous padding entry that was defined with ANDROID_KABI_RESERVE + * number: the previous "number" of the padding variable + * _new: the variable to use now instead of the padding variable + */ +#define ANDROID_KABI_USE(number, _new) \ + _ANDROID_KABI_REPLACE(_ANDROID_KABI_RESERVE(number), _new) + +/* + * ANDROID_KABI_USE2(number, _new1, _new2) + * Use a previous padding entry that was defined with ANDROID_KABI_RESERVE for + * two new variables that fit into 64 bits. This is good for when you do not + * want to "burn" a 64bit padding variable for a smaller variable size if not + * needed. + */ +#define ANDROID_KABI_USE2(number, _new1, _new2) \ + _ANDROID_KABI_REPLACE(_ANDROID_KABI_RESERVE(number), struct{ _new1; _new2; }) + + +#endif /* _ANDROID_KABI_H */ diff --git a/include/linux/bio-crypt-ctx.h b/include/linux/bio-crypt-ctx.h index 8456a409fc215ec2df92cd2ffc3e3a03bcfcc373..9df113f74e9e3af884c6dfcf6d864982b543d3e4 100644 --- a/include/linux/bio-crypt-ctx.h +++ b/include/linux/bio-crypt-ctx.h @@ -43,7 +43,15 @@ struct blk_crypto_key { unsigned int data_unit_size; unsigned int data_unit_size_bits; unsigned int size; + + /* + * Hack to avoid breaking KMI: pack both hash and dun_bytes into the + * hash field... + */ +#define BLK_CRYPTO_KEY_HASH_MASK 0xffffff +#define BLK_CRYPTO_KEY_DUN_BYTES_SHIFT 24 unsigned int hash; + bool is_hw_wrapped; u8 raw[BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE]; }; @@ -51,6 +59,26 @@ struct blk_crypto_key { #define BLK_CRYPTO_MAX_IV_SIZE 32 #define BLK_CRYPTO_DUN_ARRAY_SIZE (BLK_CRYPTO_MAX_IV_SIZE/sizeof(u64)) +static inline void +blk_crypto_key_set_hash_and_dun_bytes(struct blk_crypto_key *key, + u32 hash, unsigned int dun_bytes) +{ + key->hash = (dun_bytes << BLK_CRYPTO_KEY_DUN_BYTES_SHIFT) | + (hash & BLK_CRYPTO_KEY_HASH_MASK); +} + +static inline u32 +blk_crypto_key_hash(const struct blk_crypto_key *key) +{ + return key->hash & BLK_CRYPTO_KEY_HASH_MASK; +} + +static inline unsigned int +blk_crypto_key_dun_bytes(const struct blk_crypto_key *key) +{ + return key->hash >> BLK_CRYPTO_KEY_DUN_BYTES_SHIFT; +} + /** * struct bio_crypt_ctx - an inline encryption context * @bc_key: the key, algorithm, and data unit size to use diff --git a/include/linux/bio.h b/include/linux/bio.h index f31600f9850f3a65daf94ebb6e109936ce17640f..eaa847667561f1d1ed12dcf0cbda3a0e3b71d7b6 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -23,6 +23,7 @@ #include #include #include +#include #ifdef CONFIG_BLOCK @@ -357,6 +358,10 @@ struct bio_integrity_payload { struct work_struct bip_work; /* I/O completion */ struct bio_vec *bip_vec; + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + struct bio_vec bip_inline_vecs[0];/* embedded bvec array */ }; @@ -762,6 +767,11 @@ struct bio_set { struct bio_list rescue_list; struct work_struct rescue_work; struct workqueue_struct *rescue_workqueue; + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); }; struct biovec_slab { diff --git a/include/linux/bitops.h b/include/linux/bitops.h index 7ac2e46112b7e5d2a208e3c1ac35b0f082b67588..e02cbca3cfaf09a4e2d920c0668c5183af2ed747 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -236,17 +236,17 @@ static __always_inline void __assign_bit(long nr, volatile unsigned long *addr, #ifdef __KERNEL__ #ifndef set_mask_bits -#define set_mask_bits(ptr, _mask, _bits) \ +#define set_mask_bits(ptr, mask, bits) \ ({ \ - const typeof(*ptr) mask = (_mask), bits = (_bits); \ - typeof(*ptr) old, new; \ + const typeof(*(ptr)) mask__ = (mask), bits__ = (bits); \ + typeof(*(ptr)) old__, new__; \ \ do { \ - old = READ_ONCE(*ptr); \ - new = (old & ~mask) | bits; \ - } while (cmpxchg(ptr, old, new) != old); \ + old__ = READ_ONCE(*(ptr)); \ + new__ = (old__ & ~mask__) | bits__; \ + } while (cmpxchg(ptr, old__, new__) != old__); \ \ - new; \ + new__; \ }) #endif diff --git a/include/linux/bits.h b/include/linux/bits.h index 2b7b532c1d51d6352a524fd827f9fd776f690f78..3cfc1bb7b99accdd861f263743a5deebcbfc1f16 100644 --- a/include/linux/bits.h +++ b/include/linux/bits.h @@ -1,9 +1,11 @@ /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_BITS_H #define __LINUX_BITS_H + +#include +#include #include -#define BIT(nr) (1UL << (nr)) #define BIT_ULL(nr) (1ULL << (nr)) #define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) #define BIT_WORD(nr) ((nr) / BITS_PER_LONG) diff --git a/include/linux/blk-crypto.h b/include/linux/blk-crypto.h index d383d3b320b999b931bec4f3a1c243a6712d29db..30a0b32900b49c38b31d0eaee5ca94fe4bcd8a00 100644 --- a/include/linux/blk-crypto.h +++ b/include/linux/blk-crypto.h @@ -18,9 +18,11 @@ int blk_crypto_init_key(struct blk_crypto_key *blk_key, const u8 *raw_key, unsigned int raw_key_size, bool is_hw_wrapped, enum blk_crypto_mode_num crypto_mode, + unsigned int dun_bytes, unsigned int data_unit_size); int blk_crypto_start_using_mode(enum blk_crypto_mode_num crypto_mode, + unsigned int dun_bytes, unsigned int data_unit_size, bool is_hw_wrapped_key, struct request_queue *q); diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 51165b1c1ad9d0d8cfa326a4d8b9167a4275ebb5..5de65bec9818e87873755fd14630aee413b19588 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -9,6 +9,7 @@ #include #include #include +#include struct bio_set; struct bio; @@ -211,6 +212,9 @@ struct bio { struct bio_set *bi_pool; + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + /* * We can inline a number of vecs at the end of the bio, to avoid * double allocations for a small number of bio_vecs. This member diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index c43316d3f5be62a446ff82bc307c0ac3f552b338..26f0a62c303cef4cd3e1ea941517f7eeab2616af 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -629,7 +629,7 @@ struct request_queue { unsigned int sg_reserved_size; int node; #ifdef CONFIG_BLK_DEV_IO_TRACE - struct blk_trace *blk_trace; + struct blk_trace __rcu *blk_trace; struct mutex blk_trace_mutex; #endif /* diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h index 7bb2d8de9f308e367bcda4a5484f67483f8fc8e5..3b6ff5902edce65c9ec1dc1ccb0497b0ac521194 100644 --- a/include/linux/blktrace_api.h +++ b/include/linux/blktrace_api.h @@ -51,9 +51,13 @@ void __trace_note_message(struct blk_trace *, struct blkcg *blkcg, const char *f **/ #define blk_add_cgroup_trace_msg(q, cg, fmt, ...) \ do { \ - struct blk_trace *bt = (q)->blk_trace; \ + struct blk_trace *bt; \ + \ + rcu_read_lock(); \ + bt = rcu_dereference((q)->blk_trace); \ if (unlikely(bt)) \ __trace_note_message(bt, cg, fmt, ##__VA_ARGS__);\ + rcu_read_unlock(); \ } while (0) #define blk_add_trace_msg(q, fmt, ...) \ blk_add_cgroup_trace_msg(q, NULL, fmt, ##__VA_ARGS__) @@ -61,10 +65,14 @@ void __trace_note_message(struct blk_trace *, struct blkcg *blkcg, const char *f static inline bool blk_trace_note_message_enabled(struct request_queue *q) { - struct blk_trace *bt = q->blk_trace; - if (likely(!bt)) - return false; - return bt->act_mask & BLK_TC_NOTIFY; + struct blk_trace *bt; + bool ret; + + rcu_read_lock(); + bt = rcu_dereference(q->blk_trace); + ret = bt && (bt->act_mask & BLK_TC_NOTIFY); + rcu_read_unlock(); + return ret; } extern void blk_add_driver_data(struct request_queue *q, struct request *rq, diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 96225a77c112e6d705a6dcbabc59e662750c6c4c..9168fc33a4f7843ff760d948cf7a892e4defffce 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -189,6 +189,8 @@ struct buffer_head *__getblk_gfp(struct block_device *bdev, sector_t block, void __brelse(struct buffer_head *); void __bforget(struct buffer_head *); void __breadahead(struct block_device *, sector_t block, unsigned int size); +void __breadahead_gfp(struct block_device *, sector_t block, unsigned int size, + gfp_t gfp); struct buffer_head *__bread_gfp(struct block_device *, sector_t block, unsigned size, gfp_t gfp); void invalidate_bh_lrus(void); @@ -319,6 +321,12 @@ sb_breadahead(struct super_block *sb, sector_t block) __breadahead(sb->s_bdev, block, sb->s_blocksize); } +static inline void +sb_breadahead_unmovable(struct super_block *sb, sector_t block) +{ + __breadahead_gfp(sb->s_bdev, block, sb->s_blocksize, 0); +} + static inline struct buffer_head * sb_getblk(struct super_block *sb, sector_t block) { diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index 308918928767ad5921b17b6fe16953dfdc0f6d93..2c3e23a91bc99200208f98e4879cba48522e1434 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h @@ -27,6 +27,8 @@ struct module; #include #endif +#include + /** * struct clocksource - hardware abstraction for a free running counter * Provides mostly state-free accessors to the underlying hardware. diff --git a/include/linux/compiler.h b/include/linux/compiler.h index c9fd57ba4417f67266b1bd1998ffa0a0223d5699..0bbb5ca2735fd49387180a019c81284952c2ac12 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -345,7 +345,7 @@ static inline void *offset_to_ptr(const int *off) * compiler has support to do so. */ #define compiletime_assert(condition, msg) \ - _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__) + _compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__) #define compiletime_assert_atomic_type(t) \ compiletime_assert(__native_word(t), \ diff --git a/include/linux/const.h b/include/linux/const.h index 7b55a55f59112ccaec93486899f42b8619be8729..81b8aae5a8559c9fabf0665c48d8e8659db4f239 100644 --- a/include/linux/const.h +++ b/include/linux/const.h @@ -1,9 +1,6 @@ #ifndef _LINUX_CONST_H #define _LINUX_CONST_H -#include - -#define UL(x) (_UL(x)) -#define ULL(x) (_ULL(x)) +#include #endif /* _LINUX_CONST_H */ diff --git a/include/linux/cpu_cooling.h b/include/linux/cpu_cooling.h index de0dafb9399dc0fb8022d364a3eedebca964138d..e2bf9373f987e53d87cb264e1fd7d62a2fc7019a 100644 --- a/include/linux/cpu_cooling.h +++ b/include/linux/cpu_cooling.h @@ -30,6 +30,12 @@ struct cpufreq_policy; +typedef int (*plat_mitig_t)(int cpu, u32 clip_freq); + +struct cpu_cooling_ops { + plat_mitig_t ceil_limit, floor_limit; +}; + #ifdef CONFIG_CPU_THERMAL /** * cpufreq_cooling_register - function to create cpufreq cooling device. @@ -38,6 +44,10 @@ struct cpufreq_policy; struct thermal_cooling_device * cpufreq_cooling_register(struct cpufreq_policy *policy); +struct thermal_cooling_device * +cpufreq_platform_cooling_register(struct cpufreq_policy *policy, + struct cpu_cooling_ops *ops); + /** * cpufreq_cooling_unregister - function to remove cpufreq cooling device. * @cdev: thermal cooling device pointer. @@ -51,6 +61,13 @@ cpufreq_cooling_register(struct cpufreq_policy *policy) return ERR_PTR(-ENOSYS); } +static inline struct thermal_cooling_device * +cpufreq_platform_cooling_register(struct cpufreq_policy *policy, + struct cpu_cooling_ops *ops) +{ + return NULL; +} + static inline void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev) { diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index d67c0035165c2a90273d8fbc90aa7529ec729846..2a7359382954c96b2ebbf96e72a03c3b4674463f 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -69,11 +69,14 @@ enum cpuhp_state { CPUHP_SLAB_PREPARE, CPUHP_MD_RAID5_PREPARE, CPUHP_RCUTREE_PREP, + CPUHP_HYP_CORE_CTL_ISOLATION_DEAD, + CPUHP_CORE_CTL_ISOLATION_DEAD, CPUHP_CPUIDLE_COUPLED_PREPARE, CPUHP_POWERPC_PMAC_PREPARE, CPUHP_POWERPC_MMU_CTX_PREPARE, CPUHP_XEN_PREPARE, CPUHP_XEN_EVTCHN_PREPARE, + CPUHP_QCOM_CPUFREQ_PREPARE, CPUHP_ARM_SHMOBILE_SCU_PREPARE, CPUHP_SH_SH3X_PREPARE, CPUHP_NET_FLOW_PREPARE, @@ -96,6 +99,7 @@ enum cpuhp_state { CPUHP_AP_SCHED_STARTING, CPUHP_AP_RCUTREE_DYING, CPUHP_AP_IRQ_GIC_STARTING, + CPUHP_AP_EDAC_PMU_STARTING, CPUHP_AP_IRQ_HIP04_STARTING, CPUHP_AP_IRQ_ARMADA_XP_STARTING, CPUHP_AP_IRQ_BCM2836_STARTING, @@ -122,6 +126,8 @@ enum cpuhp_state { CPUHP_AP_JCORE_TIMER_STARTING, CPUHP_AP_ARM_TWD_STARTING, CPUHP_AP_QCOM_TIMER_STARTING, + CPUHP_AP_QCOM_CPUFREQ_STARTING, + CPUHP_AP_QCOM_SLEEP_STARTING, CPUHP_AP_ARMADA_TIMER_STARTING, CPUHP_AP_MARCO_TIMER_STARTING, CPUHP_AP_MIPS_GIC_TIMER_STARTING, @@ -134,6 +140,8 @@ enum cpuhp_state { /* Must be the last timer callback */ CPUHP_AP_DUMMY_TIMER_STARTING, CPUHP_AP_ARM_XEN_STARTING, + CPUHP_AP_ARM_SAVE_RESTORE_CORESIGHT4_STARTING, + CPUHP_AP_ARM_MM_CORESIGHT4_STARTING, CPUHP_AP_ARM_CORESIGHT_STARTING, CPUHP_AP_ARM64_ISNDEP_STARTING, CPUHP_AP_SMPCFD_DYING, @@ -170,6 +178,7 @@ enum cpuhp_state { CPUHP_AP_WATCHDOG_ONLINE, CPUHP_AP_WORKQUEUE_ONLINE, CPUHP_AP_RCUTREE_ONLINE, + CPUHP_AP_NOTIFY_PERF_ONLINE, CPUHP_AP_BASE_CACHEINFO_ONLINE, CPUHP_AP_ONLINE_DYN, CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30, diff --git a/include/linux/dcache.h b/include/linux/dcache.h index 486fc3c66215883fb2164c07a09b8ef4def509a3..45b20e0d6f77b3c4e449593fc479863235d51f2e 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -13,6 +13,7 @@ #include #include #include +#include struct path; struct vfsmount; @@ -117,6 +118,9 @@ struct dentry { struct hlist_bl_node d_in_lookup_hash; /* only for in-lookup ones */ struct rcu_head d_rcu; } d_u; + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); } __randomize_layout; /* @@ -147,6 +151,10 @@ struct dentry_operations { int (*d_manage)(const struct path *, bool); struct dentry *(*d_real)(struct dentry *, const struct inode *); void (*d_canonical_path)(const struct path *, struct path *); + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); } ____cacheline_aligned; /* diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h index 8601e40e9517b27ff9664a0011e03325a5aab1d3..2151a0649b0f0023b04ee96cacfa418f58ffe1c4 100644 --- a/include/linux/devfreq.h +++ b/include/linux/devfreq.h @@ -238,6 +238,9 @@ extern struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, * the governor may consider slowing the frequency down. * Specify 0 to use the default. Valid value = 0 to 100. * downdifferential < upthreshold must hold. + * @simple_scaling: Setting this flag will scale the clocks up only if the + * load is above @upthreshold and will scale the clocks + * down only if the load is below @downdifferential. * * If the fed devfreq_simple_ondemand_data pointer is NULL to the governor, * the governor uses the default values. @@ -245,6 +248,7 @@ extern struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, struct devfreq_simple_ondemand_data { unsigned int upthreshold; unsigned int downdifferential; + unsigned int simple_scaling; }; #endif diff --git a/include/linux/devfreq_cooling.h b/include/linux/devfreq_cooling.h index 4635f95000a4c50bff074ac5bb2f174985f7477a..79a6e37a1d6f641bcd56e15af8d2b009cb321f88 100644 --- a/include/linux/devfreq_cooling.h +++ b/include/linux/devfreq_cooling.h @@ -75,7 +75,7 @@ void devfreq_cooling_unregister(struct thermal_cooling_device *dfc); #else /* !CONFIG_DEVFREQ_THERMAL */ -struct thermal_cooling_device * +static inline struct thermal_cooling_device * of_devfreq_cooling_register_power(struct device_node *np, struct devfreq *df, struct devfreq_cooling_power *dfc_power) { diff --git a/include/linux/device.h b/include/linux/device.h index 0c4baa89ef193d6789a779e01e45c0290b98d6ee..7726a1b8fcebba6b918eb4ad9e02714969a081dc 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -26,6 +26,7 @@ #include #include #include +#include #include struct device; @@ -151,6 +152,11 @@ struct bus_type { struct lock_class_key lock_key; bool need_parent_lock; + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); }; extern int __must_check bus_register(struct bus_type *bus); @@ -314,6 +320,11 @@ struct device_driver { void (*coredump) (struct device *dev); struct driver_private *p; + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); }; @@ -440,6 +451,11 @@ struct class { const struct dev_pm_ops *pm; struct subsys_private *p; + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); }; struct class_dev_iter { @@ -1083,6 +1099,15 @@ struct device { bool offline:1; bool of_node_reused:1; bool state_synced:1; + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); + ANDROID_KABI_RESERVE(5); + ANDROID_KABI_RESERVE(6); + ANDROID_KABI_RESERVE(7); + ANDROID_KABI_RESERVE(8); }; static inline struct device *kobj_to_dev(struct kobject *kobj) diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index 5087b24c25c619f853cb60b58c07b4ead576fa66..c8d953c92fe0ef9afc8c20091193b34f1043f4d8 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -355,6 +355,21 @@ struct dma_buf_ops { void *(*vmap)(struct dma_buf *); void (*vunmap)(struct dma_buf *, void *vaddr); + /** + * @get_uuid + * + * This is called by dma_buf_get_uuid to get the UUID which identifies + * the buffer to virtio devices. + * + * This callback is optional. + * + * Returns: + * + * 0 on success or a negative error code on failure. On success uuid + * will be populated with the buffer's UUID. + */ + int (*get_uuid)(struct dma_buf *dmabuf, uuid_t *uuid); + /** * @get_flags: * @@ -549,6 +564,7 @@ int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *, void *dma_buf_vmap(struct dma_buf *); void dma_buf_vunmap(struct dma_buf *, void *vaddr); int dma_buf_get_flags(struct dma_buf *dmabuf, unsigned long *flags); +int dma_buf_get_uuid(struct dma_buf *dmabuf, uuid_t *uuid); /** * dma_buf_set_destructor - set the dma-buf's destructor @@ -563,4 +579,5 @@ static inline void dma_buf_set_destructor(struct dma_buf *dmabuf, dmabuf->dtor = dtor; dmabuf->dtor_data = dtor_data; } + #endif /* __DMA_BUF_H__ */ diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h index e8ca5e6542773fea4c54725f360743e3f1407b7c..fd7652e0aed90587b1cfbbad38ebc11d08224e3e 100644 --- a/include/linux/dma-iommu.h +++ b/include/linux/dma-iommu.h @@ -25,6 +25,8 @@ #include #include +struct iova_domain; + int iommu_dma_init(void); /* Domain management interface for IOMMU drivers */ @@ -56,6 +58,11 @@ dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, int prot); int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, int prot); +size_t iommu_dma_prepare_map_sg(struct device *dev, struct iova_domain *iovad, + struct scatterlist *sg, int nents); +int iommu_dma_finalise_sg(struct device *dev, struct scatterlist *sg, + int nents, dma_addr_t dma_addr); +void iommu_dma_invalidate_sg(struct scatterlist *sg, int nents); /* * Arch code with no special attribute handling may use these @@ -75,6 +82,11 @@ int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr); void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg); void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list); +int iommu_dma_reserve_iova(struct device *dev, dma_addr_t base, + u64 size); + +int iommu_dma_enable_best_fit_algo(struct device *dev); + #else struct iommu_domain; @@ -108,6 +120,17 @@ static inline void iommu_dma_get_resv_regions(struct device *dev, struct list_he { } +static inline int iommu_dma_reserve_iova(struct device *dev, dma_addr_t base, + u64 size) +{ + return -ENODEV; +} + +static inline int iommu_dma_enable_best_fit_algo(struct device *dev) +{ + return -ENODEV; +} + #endif /* CONFIG_IOMMU_DMA */ #endif /* __KERNEL__ */ #endif /* __DMA_IOMMU_H */ diff --git a/include/linux/dma-mapping-fast.h b/include/linux/dma-mapping-fast.h new file mode 100644 index 0000000000000000000000000000000000000000..aca7592bfb6e7709fa8d0f324a049903bc9bfda1 --- /dev/null +++ b/include/linux/dma-mapping-fast.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. + */ + +#ifndef __LINUX_DMA_MAPPING_FAST_H +#define __LINUX_DMA_MAPPING_FAST_H + +#include +#include + +struct dma_iommu_mapping; +struct io_pgtable_ops; +struct iova_domain; + +struct dma_fast_smmu_mapping { + struct device *dev; + struct iommu_domain *domain; + struct iova_domain *iovad; + + dma_addr_t base; + size_t size; + size_t num_4k_pages; + + unsigned int bitmap_size; + unsigned long *bitmap; + unsigned long next_start; + unsigned long upcoming_stale_bit; + bool have_stale_tlbs; + + dma_addr_t pgtbl_dma_handle; + struct io_pgtable_ops *pgtbl_ops; + + spinlock_t lock; + struct notifier_block notifier; +}; + +#ifdef CONFIG_IOMMU_IO_PGTABLE_FAST +int fast_smmu_init_mapping(struct device *dev, + struct dma_iommu_mapping *mapping); +void fast_smmu_put_dma_cookie(struct iommu_domain *domain); +#else +static inline int fast_smmu_init_mapping(struct device *dev, + struct dma_iommu_mapping *mapping) +{ + return -ENODEV; +} + +static inline void fast_smmu_put_dma_cookie(struct iommu_domain *domain) {} +#endif + +#endif /* __LINUX_DMA_MAPPING_FAST_H */ diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index d042a15e9aa7a2db1422794602440bcd74a3079c..211bdf806771eb58e38fab6ea8b91e4f94ba43c0 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -11,6 +11,7 @@ #include #include #include +#include /** * List of possible attributes associated with a DMA mapping. The semantics @@ -70,10 +71,57 @@ */ #define DMA_ATTR_PRIVILEGED (1UL << 9) +/* + * DMA_ATTR_STRONGLY_ORDERED: Specifies that accesses to the mapping must + * not be buffered, reordered, merged with other accesses, or unaligned. + * No speculative access may occur in this mapping. + */ +#define DMA_ATTR_STRONGLY_ORDERED (1UL << 10) /* * DMA_ATTR_SKIP_ZEROING: Do not zero mapping. */ #define DMA_ATTR_SKIP_ZEROING (1UL << 11) +/* + * DMA_ATTR_NO_DELAYED_UNMAP: Used by msm specific lazy mapping to indicate + * that the mapping can be freed on unmap, rather than when the ion_buffer + * is freed. + */ +#define DMA_ATTR_NO_DELAYED_UNMAP (1UL << 12) +/* + * DMA_ATTR_EXEC_MAPPING: The mapping has executable permissions. + */ +#define DMA_ATTR_EXEC_MAPPING (1UL << 13) +/* + * DMA_ATTR_IOMMU_USE_UPSTREAM_HINT: Normally an smmu will override any bus + * attributes (i.e cacheablilty) provided by the client device. Some hardware + * may be designed to use the original attributes instead. + */ +#define DMA_ATTR_IOMMU_USE_UPSTREAM_HINT (1UL << 14) +/* + * When passed to a DMA map call the DMA_ATTR_FORCE_COHERENT DMA + * attribute can be used to force a buffer to be mapped as IO coherent. + */ +#define DMA_ATTR_FORCE_COHERENT (1UL << 15) +/* + * When passed to a DMA map call the DMA_ATTR_FORCE_NON_COHERENT DMA + * attribute can be used to force a buffer to not be mapped as IO + * coherent. + */ +#define DMA_ATTR_FORCE_NON_COHERENT (1UL << 16) +/* + * DMA_ATTR_DELAYED_UNMAP: Used by ION, it will ensure that mappings are not + * removed on unmap but instead are removed when the ion_buffer is freed. + */ +#define DMA_ATTR_DELAYED_UNMAP (1UL << 17) + +/* + * DMA_ATTR_IOMMU_USE_LLC_NWA: Overrides the bus attributes to use the System + * Cache(LLC) with allocation policy as Inner Non-Cacheable, Outer Cacheable: + * Write-Back, Read-Allocate, No Write-Allocate policy. + */ +#define DMA_ATTR_IOMMU_USE_LLC_NWA (1UL << 18) + +#define DMA_ERROR_CODE (~(dma_addr_t)0) /* * A dma_addr_t can hold any valid DMA or bus address for the platform. @@ -135,6 +183,7 @@ struct dma_map_ops { enum dma_data_direction direction); int (*mapping_error)(struct device *dev, dma_addr_t dma_addr); int (*dma_supported)(struct device *dev, u64 mask); + int (*set_dma_mask)(struct device *dev, u64 mask); void *(*remap)(struct device *dev, void *cpu_addr, dma_addr_t handle, size_t size, unsigned long attrs); void (*unremap)(struct device *dev, void *remapped_address, @@ -142,6 +191,11 @@ struct dma_map_ops { #ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK u64 (*get_required_mask)(struct device *dev); #endif + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); }; extern const struct dma_map_ops dma_direct_ops; @@ -463,7 +517,8 @@ void *dma_common_contiguous_remap(struct page *page, size_t size, void *dma_common_pages_remap(struct page **pages, size_t size, unsigned long vm_flags, pgprot_t prot, const void *caller); -void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags); +void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags, + bool nowarn); /** * dma_mmap_attrs - map a coherent DMA allocation into user space @@ -608,6 +663,11 @@ static inline int dma_supported(struct device *dev, u64 mask) #ifndef HAVE_ARCH_DMA_SET_MASK static inline int dma_set_mask(struct device *dev, u64 mask) { + const struct dma_map_ops *ops = get_dma_ops(dev); + + if (ops->set_dma_mask) + return ops->set_dma_mask(dev, mask); + if (!dev->dma_mask || !dma_supported(dev, mask)) return -EIO; @@ -769,6 +829,10 @@ int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, void dma_release_declared_memory(struct device *dev); void *dma_mark_declared_memory_occupied(struct device *dev, dma_addr_t device_addr, size_t size); +dma_addr_t dma_get_device_base(struct device *dev, + struct dma_coherent_mem *mem); +unsigned long dma_get_size(struct dma_coherent_mem *mem); + #else static inline int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, @@ -788,6 +852,17 @@ dma_mark_declared_memory_occupied(struct device *dev, { return ERR_PTR(-EBUSY); } +static inline dma_addr_t +dma_get_device_base(struct device *dev, struct dma_coherent_mem *mem) +{ + return 0; +} + +static inline unsigned long dma_get_size(struct dma_coherent_mem *mem) +{ + return 0; +} + #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ #ifdef CONFIG_HAS_DMA diff --git a/include/linux/elevator.h b/include/linux/elevator.h index d10cc2971658391adacc39b7007574775d95c657..aa8e39244d78ad6aee424cb9d0dad9715351acf8 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h @@ -4,6 +4,7 @@ #include #include +#include #ifdef CONFIG_BLOCK @@ -120,6 +121,11 @@ struct elevator_mq_ops { void (*init_icq)(struct io_cq *); void (*exit_icq)(struct io_cq *); void (*elevator_registered_fn)(struct request_queue *q); + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); }; #define ELV_NAME_MAX (16) @@ -158,6 +164,9 @@ struct elevator_type /* managed by elevator core */ char icq_cache_name[ELV_NAME_MAX + 6]; /* elvname + "_io_cq" */ struct list_head list; + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); }; #define ELV_HASH_BITS 6 diff --git a/include/linux/elfnote.h b/include/linux/elfnote.h index f236f5b931b2a0ef0021556bf1b41be09668062a..594d4e78654fcfe1c07b47619dfb1c9986777bb8 100644 --- a/include/linux/elfnote.h +++ b/include/linux/elfnote.h @@ -59,7 +59,7 @@ ELFNOTE_END #else /* !__ASSEMBLER__ */ -#include +#include /* * Use an anonymous structure which matches the shape of * Elf{32,64}_Nhdr, but includes the name and desc data. The size and diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index f8a2245b70ac354231c7911e9684f756ed47de0f..3aaebb60a05336c49ab44abae6fd2aa900a94a7f 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -15,6 +15,7 @@ #include #include +#include #include #ifdef CONFIG_COMPAT @@ -412,5 +413,10 @@ struct ethtool_ops { struct ethtool_fecparam *); void (*get_ethtool_phy_stats)(struct net_device *, struct ethtool_stats *, u64 *); + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); }; #endif /* _LINUX_ETHTOOL_H */ diff --git a/include/linux/export.h b/include/linux/export.h index ce764a5d2ee45023c99da81485f77bd958480de7..e7ccb184467cbf165327f4ad87760ff579a54962 100644 --- a/include/linux/export.h +++ b/include/linux/export.h @@ -103,7 +103,7 @@ struct kernel_symbol { */ #define __EXPORT_SYMBOL(sym, sec) === __KSYM_##sym === -#elif defined(CONFIG_TRIM_UNUSED_KSYMS) +#elif defined(CONFIG_TRIM_UNUSED_KSYMS) && !defined(MODULE) #include diff --git a/include/linux/extcon.h b/include/linux/extcon.h index d91dd704a729a9af69f0c4724fc1b068a292e1ed..1445b7bd36f4c1fda790f4c787cb5c8a677889f1 100644 --- a/include/linux/extcon.h +++ b/include/linux/extcon.h @@ -125,14 +125,19 @@ * @type: integer (intval) * @value: 0 (USB/USB2) or 1 (USB3) * @default: 0 (USB/USB2) + * - EXTCON_PROP_USB_TYPEC_MED_HIGH_CURRENT + * @type: integer (intval) + * @value: 0 (default current), 1 (medium or high current) + * @default: 0 (default current) * */ #define EXTCON_PROP_USB_VBUS 0 #define EXTCON_PROP_USB_TYPEC_POLARITY 1 #define EXTCON_PROP_USB_SS 2 +#define EXTCON_PROP_USB_TYPEC_MED_HIGH_CURRENT 3 #define EXTCON_PROP_USB_MIN 0 -#define EXTCON_PROP_USB_MAX 2 +#define EXTCON_PROP_USB_MAX 3 #define EXTCON_PROP_USB_CNT (EXTCON_PROP_USB_MAX - EXTCON_PROP_USB_MIN + 1) /* Properties of EXTCON_TYPE_CHG. */ @@ -210,6 +215,8 @@ extern int extcon_unregister_notifier(struct extcon_dev *edev, unsigned int id, struct notifier_block *nb); extern int extcon_register_blocking_notifier(struct extcon_dev *edev, unsigned int id, struct notifier_block *nb); +extern int extcon_unregister_blocking_notifier(struct extcon_dev *edev, + unsigned int id, struct notifier_block *nb); extern int devm_extcon_register_notifier(struct device *dev, struct extcon_dev *edev, unsigned int id, struct notifier_block *nb); @@ -239,6 +246,8 @@ extern struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev, /* Following API get the name of extcon device. */ extern const char *extcon_get_edev_name(struct extcon_dev *edev); +extern int extcon_blocking_sync(struct extcon_dev *edev, unsigned int id, + bool val); #else /* CONFIG_EXTCON */ static inline int extcon_get_state(struct extcon_dev *edev, unsigned int id) { @@ -277,6 +286,13 @@ static inline int extcon_register_blocking_notifier(struct extcon_dev *edev, return 0; } +static inline int extcon_unregister_blocking_notifier(struct extcon_dev *edev, + unsigned int id, + struct notifier_block *nb) +{ + return 0; +} + static inline int devm_extcon_register_notifier(struct device *dev, struct extcon_dev *edev, unsigned int id, struct notifier_block *nb) diff --git a/include/linux/fs.h b/include/linux/fs.h index 03db303ce975601f8f08dd63d7072ae316d46fbd..3fde08471f7bf24a5f2a0a190bb4df069bde4e8e 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -37,6 +37,7 @@ #include #include #include +#include #include #include @@ -398,6 +399,11 @@ struct address_space_operations { int (*swap_activate)(struct swap_info_struct *sis, struct file *file, sector_t *span); void (*swap_deactivate)(struct file *file); + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); }; extern const struct address_space_operations empty_aops; @@ -432,6 +438,11 @@ struct address_space { struct list_head private_list; /* for use by the address_space */ void *private_data; /* ditto */ errseq_t wb_err; + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); } __attribute__((aligned(sizeof(long)))) __randomize_layout; /* * On most architectures that alignment is already the case; but @@ -476,6 +487,11 @@ struct block_device { int bd_fsfreeze_count; /* Mutex for freeze */ struct mutex bd_fsfreeze_mutex; + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); } __randomize_layout; /* @@ -700,6 +716,9 @@ struct inode { #endif void *i_private; /* fs or device private pointer */ + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); } __randomize_layout; static inline unsigned int i_blocksize(const struct inode *node) @@ -991,6 +1010,9 @@ struct file_lock; struct file_lock_operations { void (*fl_copy_lock)(struct file_lock *, struct file_lock *); void (*fl_release_private)(struct file_lock *); + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); }; struct lock_manager_operations { @@ -1003,6 +1025,9 @@ struct lock_manager_operations { bool (*lm_break)(struct file_lock *); int (*lm_change)(struct file_lock *, int, struct list_head *); void (*lm_setup)(struct file_lock *, void **); + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); }; struct lock_manager { @@ -1070,6 +1095,10 @@ struct file_lock { int state; /* state of grant or error if -ve */ } afs; } fl_u; + + struct list_head android_reserved1; /* not a macro as we might just need it as-is */ + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); } __randomize_layout; struct file_lock_context { @@ -1497,6 +1526,11 @@ struct super_block { spinlock_t s_inode_wblist_lock; struct list_head s_inodes_wb; /* writeback inodes */ + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); } __randomize_layout; /* Helper functions so that in most cases filesystems will @@ -1808,6 +1842,11 @@ struct file_operations { int (*dedupe_file_range)(struct file *, loff_t, struct file *, loff_t, u64); int (*fadvise)(struct file *, loff_t, loff_t, int); + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); } __randomize_layout; struct inode_operations { @@ -1840,6 +1879,11 @@ struct inode_operations { umode_t create_mode); int (*tmpfile) (struct inode *, struct dentry *, umode_t); int (*set_acl)(struct inode *, struct posix_acl *, int); + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); } ____cacheline_aligned; static inline ssize_t call_read_iter(struct file *file, struct kiocb *kio, @@ -1925,6 +1969,11 @@ struct super_operations { struct shrink_control *); long (*free_cached_objects)(struct super_block *, struct shrink_control *); + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); }; /* @@ -2191,6 +2240,11 @@ struct file_system_type { struct lock_class_key i_lock_key; struct lock_class_key i_mutex_key; struct lock_class_key i_mutex_dir_key; + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); }; #define MODULE_ALIAS_FS(NAME) MODULE_ALIAS("fs-" NAME) diff --git a/include/linux/genhd.h b/include/linux/genhd.h index f13272d8433200b5c846365b808a29180d21b12f..a77a364fff29b0c7107784828b8a00b8144ddc81 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -17,6 +17,7 @@ #include #include #include +#include #ifdef CONFIG_BLOCK @@ -130,6 +131,11 @@ struct hd_struct { #endif struct percpu_ref ref; struct rcu_work rcu_work; + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); }; #define GENHD_FL_REMOVABLE 1 @@ -167,6 +173,9 @@ struct blk_integrity { unsigned char tuple_size; unsigned char interval_exp; unsigned char tag_size; + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); }; #endif /* CONFIG_BLK_DEV_INTEGRITY */ @@ -211,6 +220,12 @@ struct gendisk { int node_id; struct badblocks *bb; struct lockdep_map lockdep_map; + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); + }; static inline struct gendisk *part_to_disk(struct hd_struct *part) diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 542b4fa2cda9b0051d9a6fe11a7d642e2d2bbc59..1e9e5b1775191690a820053992261860d0375f6b 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -15,13 +15,14 @@ #ifndef _LINUX_HRTIMER_H #define _LINUX_HRTIMER_H +#include #include -#include #include #include #include #include #include +#include struct hrtimer_clock_base; struct hrtimer_cpu_base; @@ -115,6 +116,8 @@ struct hrtimer { u8 state; u8 is_rel; u8 is_soft; + + ANDROID_KABI_RESERVE(1); }; /** @@ -301,26 +304,12 @@ struct clock_event_device; extern void hrtimer_interrupt(struct clock_event_device *dev); -/* - * The resolution of the clocks. The resolution value is returned in - * the clock_getres() system call to give application programmers an - * idea of the (in)accuracy of timers. Timer values are rounded up to - * this resolution values. - */ -# define HIGH_RES_NSEC 1 -# define KTIME_HIGH_RES (HIGH_RES_NSEC) -# define MONOTONIC_RES_NSEC HIGH_RES_NSEC -# define KTIME_MONOTONIC_RES KTIME_HIGH_RES - extern void clock_was_set_delayed(void); extern unsigned int hrtimer_resolution; #else -# define MONOTONIC_RES_NSEC LOW_RES_NSEC -# define KTIME_MONOTONIC_RES KTIME_LOW_RES - #define hrtimer_resolution (unsigned int)LOW_RES_NSEC static inline void clock_was_set_delayed(void) { } diff --git a/include/linux/hrtimer_defs.h b/include/linux/hrtimer_defs.h new file mode 100644 index 0000000000000000000000000000000000000000..2d3e3c5fb94662cd497171e758b05364475d2ca8 --- /dev/null +++ b/include/linux/hrtimer_defs.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_HRTIMER_DEFS_H +#define _LINUX_HRTIMER_DEFS_H + +#include + +#ifdef CONFIG_HIGH_RES_TIMERS + +/* + * The resolution of the clocks. The resolution value is returned in + * the clock_getres() system call to give application programmers an + * idea of the (in)accuracy of timers. Timer values are rounded up to + * this resolution values. + */ +# define HIGH_RES_NSEC 1 +# define KTIME_HIGH_RES (HIGH_RES_NSEC) +# define MONOTONIC_RES_NSEC HIGH_RES_NSEC +# define KTIME_MONOTONIC_RES KTIME_HIGH_RES + +#else + +# define MONOTONIC_RES_NSEC LOW_RES_NSEC +# define KTIME_MONOTONIC_RES KTIME_LOW_RES + +#endif + +#endif diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index c83478271c2e03788f5d5e731923966f29131a5f..778d3ef939d89f6ffc18dae35be5cc758cbc5dd2 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -622,6 +622,15 @@ static inline bool ieee80211_is_qos_nullfunc(__le16 fc) cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_NULLFUNC); } +/** + * ieee80211_is_any_nullfunc - check if frame is regular or QoS nullfunc frame + * @fc: frame control bytes in little-endian byteorder + */ +static inline bool ieee80211_is_any_nullfunc(__le16 fc) +{ + return (ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc)); +} + /** * ieee80211_is_bufferable_mmpdu - check if frame is bufferable MMPDU * @fc: frame control field in little-endian byteorder diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index a74cb177dc6f9cf9bcab0ed6547760b1e01b6e70..136ce51548a824008d9fd2e09202d329b6796a77 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h @@ -599,7 +599,7 @@ void iio_device_unregister(struct iio_dev *indio_dev); * 0 on success, negative error number on failure. */ #define devm_iio_device_register(dev, indio_dev) \ - __devm_iio_device_register((dev), (indio_dev), THIS_MODULE); + __devm_iio_device_register((dev), (indio_dev), THIS_MODULE) int __devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev, struct module *this_mod); void devm_iio_device_unregister(struct device *dev, struct iio_dev *indio_dev); diff --git a/include/linux/io-pgtable-fast.h b/include/linux/io-pgtable-fast.h new file mode 100644 index 0000000000000000000000000000000000000000..3eeef2655280f63a46059e46ef6803427fb4397f --- /dev/null +++ b/include/linux/io-pgtable-fast.h @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017, The Linux Foundation. All rights reserved. + */ + +#ifndef __LINUX_IO_PGTABLE_FAST_H +#define __LINUX_IO_PGTABLE_FAST_H + +#include + +/* + * This ought to be private to io-pgtable-fast, but dma-mapping-fast + * currently requires it for a debug usecase. + */ +typedef u64 av8l_fast_iopte; + +struct io_pgtable_ops; + +#ifdef CONFIG_IOMMU_IO_PGTABLE_FAST + +int av8l_fast_map_public(struct io_pgtable_ops *ops, unsigned long iova, + phys_addr_t paddr, size_t size, int prot); + +void av8l_fast_unmap_public(struct io_pgtable_ops *ops, unsigned long iova, + size_t size); + +int av8l_fast_map_sg_public(struct io_pgtable_ops *ops, + unsigned long iova, struct scatterlist *sgl, + unsigned int nents, int prot, size_t *size); + +bool av8l_fast_iova_coherent_public(struct io_pgtable_ops *ops, + unsigned long iova); + +phys_addr_t av8l_fast_iova_to_phys_public(struct io_pgtable_ops *ops, + unsigned long iova); +#else +static inline int +av8l_fast_map_public(struct io_pgtable_ops *ops, unsigned long iova, + phys_addr_t paddr, size_t size, int prot) +{ + return -EINVAL; +} +static inline void av8l_fast_unmap_public(struct io_pgtable_ops *ops, + unsigned long iova, size_t size) +{ +} + +static inline int av8l_fast_map_sg_public(struct io_pgtable_ops *ops, + unsigned long iova, struct scatterlist *sgl, + unsigned int nents, int prot, size_t *size) +{ + return 0; +} + +static inline bool av8l_fast_iova_coherent_public(struct io_pgtable_ops *ops, + unsigned long iova) +{ + return false; +} +static inline phys_addr_t +av8l_fast_iova_to_phys_public(struct io_pgtable_ops *ops, + unsigned long iova) +{ + return 0; +} +#endif /* CONFIG_IOMMU_IO_PGTABLE_FAST */ + + +/* events for notifiers passed to av8l_register_notify */ +#define MAPPED_OVER_STALE_TLB 1 + + +#ifdef CONFIG_IOMMU_IO_PGTABLE_FAST_PROVE_TLB +/* + * Doesn't matter what we use as long as bit 0 is unset. The reason why we + * need a different value at all is that there are certain hardware + * platforms with erratum that require that a PTE actually be zero'd out + * and not just have its valid bit unset. + */ +#define AV8L_FAST_PTE_UNMAPPED_NEED_TLBI 0xa + +void av8l_fast_clear_stale_ptes(struct io_pgtable_ops *ops, u64 base, + u64 start, u64 end, bool skip_sync); +void av8l_register_notify(struct notifier_block *nb); + +#else /* !CONFIG_IOMMU_IO_PGTABLE_FAST_PROVE_TLB */ + +#define AV8L_FAST_PTE_UNMAPPED_NEED_TLBI 0 + +static inline void av8l_fast_clear_stale_ptes(struct io_pgtable_ops *ops, + u64 base, + u64 start, + u64 end, + bool skip_sync) +{ +} + +static inline void av8l_register_notify(struct notifier_block *nb) +{ +} + +#endif /* CONFIG_IOMMU_IO_PGTABLE_FAST_PROVE_TLB */ + +#endif /* __LINUX_IO_PGTABLE_FAST_H */ diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h index 2df79093cad919a9e51c6a0a918d482db27c92fb..24fd587f2012c22c9f2ab2bd84fa2642bda462a0 100644 --- a/include/linux/io-pgtable.h +++ b/include/linux/io-pgtable.h @@ -3,6 +3,8 @@ #define __IO_PGTABLE_H #include +#include + /* * Public API for use by IOMMU drivers */ @@ -12,6 +14,7 @@ enum io_pgtable_fmt { ARM_64_LPAE_S1, ARM_64_LPAE_S2, ARM_V7S, + ARM_V8L_FAST, IO_PGTABLE_NUM_FMTS, }; @@ -23,6 +26,10 @@ enum io_pgtable_fmt { * @tlb_sync: Ensure any queued TLB invalidation has taken effect, and * any corresponding page table updates are visible to the * IOMMU. + * @alloc_pages_exact: Allocate page table memory (optional, defaults to + * alloc_pages_exact) + * @free_pages_exact: Free page table memory (optional, defaults to + * free_pages_exact) * * Note that these can all be called in atomic context and must therefore * not block. @@ -32,6 +39,8 @@ struct iommu_gather_ops { void (*tlb_add_flush)(unsigned long iova, size_t size, size_t granule, bool leaf, void *cookie); void (*tlb_sync)(void *cookie); + void *(*alloc_pages_exact)(void *cookie, size_t size, gfp_t gfp_mask); + void (*free_pages_exact)(void *cookie, void *virt, size_t size); }; /** @@ -67,22 +76,44 @@ struct io_pgtable_cfg { * when the SoC is in "4GB mode" and they can only access the high * remap of DRAM (0x1_00000000 to 0x1_ffffffff). * + * IO_PGTABLE_QUIRK_NO_DMA: Guarantees that the tables will only ever * be accessed by a fully cache-coherent IOMMU or CPU (e.g. for a * software-emulated IOMMU), such that pagetable updates need not * be treated as explicit DMA data. + * + + * IO_PGTABLE_QUIRK_QSMMUV500_NON_SHAREABLE: + * Having page tables which are non coherent, but cached in a + * system cache requires SH=Non-Shareable. This applies to the + * qsmmuv500 model. For data buffers SH=Non-Shareable is not + * required. + + * IO_PGTABLE_QUIRK_QCOM_USE_UPSTREAM_HINT: Override the attributes + * set in TCR for the page table walker. Use attributes specified + * by the upstream hw instead. + * + * IO_PGTABLE_QUIRK_QCOM_USE_LLC_NWA: Override the attributes + * set in TCR for the page table walker with Write-Back, + * no Write-Allocate cacheable encoding. + * */ #define IO_PGTABLE_QUIRK_ARM_NS BIT(0) #define IO_PGTABLE_QUIRK_NO_PERMS BIT(1) #define IO_PGTABLE_QUIRK_TLBI_ON_MAP BIT(2) #define IO_PGTABLE_QUIRK_ARM_MTK_4GB BIT(3) #define IO_PGTABLE_QUIRK_NO_DMA BIT(4) + #define IO_PGTABLE_QUIRK_QSMMUV500_NON_SHAREABLE BIT(5) + #define IO_PGTABLE_QUIRK_QCOM_USE_UPSTREAM_HINT BIT(6) + #define IO_PGTABLE_QUIRK_QCOM_USE_LLC_NWA BIT(7) unsigned long quirks; unsigned long pgsize_bitmap; unsigned int ias; unsigned int oas; const struct iommu_gather_ops *tlb; struct device *iommu_dev; + dma_addr_t iova_base; + dma_addr_t iova_end; /* Low-level data specific to the table format */ union { @@ -103,15 +134,28 @@ struct io_pgtable_cfg { u32 nmrr; u32 prrr; } arm_v7s_cfg; + + struct { + u64 ttbr[2]; + u64 tcr; + u64 mair[2]; + void *pmds; + } av8l_fast_cfg; }; }; /** * struct io_pgtable_ops - Page table manipulation API for IOMMU drivers. * - * @map: Map a physically contiguous memory region. - * @unmap: Unmap a physically contiguous memory region. - * @iova_to_phys: Translate iova to physical address. + * @map: Map a physically contiguous memory region. + * @map_sg: Map a scatterlist. Returns the number of bytes mapped, + * or -ve val on failure. The size parameter contains the + * size of the partial mapping in case of failure. + * @unmap: Unmap a physically contiguous memory region. + * @iova_to_phys: Translate iova to physical address. + * @is_iova_coherent: Checks coherency of given IOVA. Returns True if coherent + * and False if non-coherent. + * @iova_to_pte: Translate iova to Page Table Entry (PTE). * * These functions map directly onto the iommu_ops member functions with * the same names. @@ -121,8 +165,16 @@ struct io_pgtable_ops { phys_addr_t paddr, size_t size, int prot); size_t (*unmap)(struct io_pgtable_ops *ops, unsigned long iova, size_t size); + int (*map_sg)(struct io_pgtable_ops *ops, unsigned long iova, + struct scatterlist *sg, unsigned int nents, + int prot, size_t *size); phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops, unsigned long iova); + bool (*is_iova_coherent)(struct io_pgtable_ops *ops, + unsigned long iova); + uint64_t (*iova_to_pte)(struct io_pgtable_ops *ops, + unsigned long iova); + }; /** @@ -173,17 +225,23 @@ struct io_pgtable { static inline void io_pgtable_tlb_flush_all(struct io_pgtable *iop) { + if (!iop->cfg.tlb) + return; iop->cfg.tlb->tlb_flush_all(iop->cookie); } static inline void io_pgtable_tlb_add_flush(struct io_pgtable *iop, unsigned long iova, size_t size, size_t granule, bool leaf) { + if (!iop->cfg.tlb) + return; iop->cfg.tlb->tlb_add_flush(iova, size, granule, leaf, iop->cookie); } static inline void io_pgtable_tlb_sync(struct io_pgtable *iop) { + if (!iop->cfg.tlb) + return; iop->cfg.tlb->tlb_sync(iop->cookie); } @@ -204,5 +262,31 @@ extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns; extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns; extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns; extern struct io_pgtable_init_fns io_pgtable_arm_v7s_init_fns; +extern struct io_pgtable_init_fns io_pgtable_av8l_fast_init_fns; +extern struct io_pgtable_init_fns io_pgtable_arm_msm_secure_init_fns; + +/** + * io_pgtable_alloc_pages_exact: + * allocate an exact number of physically-contiguous pages. + * @size: the number of bytes to allocate + * @gfp_mask: GFP flags for the allocation + * + * Like alloc_pages_exact(), but with some additional accounting for debug + * purposes. + */ +void *io_pgtable_alloc_pages_exact(struct io_pgtable_cfg *cfg, void *cookie, + size_t size, gfp_t gfp_mask); + +/** + * io_pgtable_free_pages_exact: + * release memory allocated via io_pgtable_alloc_pages_exact() + * @virt: the value returned by alloc_pages_exact. + * @size: size of allocation, same value as passed to alloc_pages_exact(). + * + * Like free_pages_exact(), but with some additional accounting for debug + * purposes. + */ +void io_pgtable_free_pages_exact(struct io_pgtable_cfg *cfg, void *cookie, + void *virt, size_t size); #endif /* __IO_PGTABLE_H */ diff --git a/include/linux/io.h b/include/linux/io.h index 32e30e8fb9db92cf1472c6188ff3310a610b3c5f..da39ff89df65164548cbe2f4a1ccab91301b167e 100644 --- a/include/linux/io.h +++ b/include/linux/io.h @@ -75,6 +75,8 @@ static inline void devm_ioport_unmap(struct device *dev, void __iomem *addr) void __iomem *devm_ioremap(struct device *dev, resource_size_t offset, resource_size_t size); +void __iomem *devm_ioremap_uc(struct device *dev, resource_size_t offset, + resource_size_t size); void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset, resource_size_t size); void __iomem *devm_ioremap_wc(struct device *dev, resource_size_t offset, diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h index dba15ca8e60bc8b0f0d0faf3ca13c2afcd87c231..1dcd9198beb7faf30a820f65e811ba3eb6d0bea0 100644 --- a/include/linux/iocontext.h +++ b/include/linux/iocontext.h @@ -8,6 +8,7 @@ enum { ICQ_EXITED = 1 << 2, + ICQ_DESTROYED = 1 << 3, }; /* diff --git a/include/linux/iomap.h b/include/linux/iomap.h index 3555d54bf79a5a708341b21958362c478edbbf2d..eab2a424cc0e7001fca1d2146b6b965775b43e94 100644 --- a/include/linux/iomap.h +++ b/include/linux/iomap.h @@ -6,6 +6,7 @@ #include #include #include +#include struct address_space; struct fiemap_extent_info; @@ -99,6 +100,9 @@ struct iomap_ops { */ int (*iomap_end)(struct inode *inode, loff_t pos, loff_t length, ssize_t written, unsigned flags, struct iomap *iomap); + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); }; /* diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 87994c265bf5068cc31381623629521f59cf4aa2..581570bbad36724d3b3a971a7f819506c8fcd7d6 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -41,6 +41,11 @@ * if the IOMMU page table format is equivalent. */ #define IOMMU_PRIV (1 << 5) +/* Use upstream device's bus attribute */ +#define IOMMU_USE_UPSTREAM_HINT (1 << 6) + +/* Use upstream device's bus attribute with no write-allocate cache policy */ +#define IOMMU_USE_LLC_NWA (1 << 7) struct iommu_ops; struct iommu_group; @@ -50,8 +55,12 @@ struct iommu_domain; struct notifier_block; /* iommu fault flags */ -#define IOMMU_FAULT_READ 0x0 -#define IOMMU_FAULT_WRITE 0x1 +#define IOMMU_FAULT_READ (1 << 0) +#define IOMMU_FAULT_WRITE (1 << 1) +#define IOMMU_FAULT_TRANSLATION (1 << 2) +#define IOMMU_FAULT_PERMISSION (1 << 3) +#define IOMMU_FAULT_EXTERNAL (1 << 4) +#define IOMMU_FAULT_TRANSACTION_STALLED (1 << 5) typedef int (*iommu_fault_handler_t)(struct iommu_domain *, struct device *, unsigned long, int, void *); @@ -62,6 +71,10 @@ struct iommu_domain_geometry { bool force_aperture; /* DMA only allowed in mappable range? */ }; +struct iommu_pgtbl_info { + void *ops; +}; + /* Domain feature flags */ #define __IOMMU_DOMAIN_PAGING (1U << 0) /* Support for iommu_map/unmap */ #define __IOMMU_DOMAIN_DMA_API (1U << 1) /* Domain for use in DMA-API @@ -86,6 +99,8 @@ struct iommu_domain_geometry { #define IOMMU_DOMAIN_DMA (__IOMMU_DOMAIN_PAGING | \ __IOMMU_DOMAIN_DMA_API) + +#define IOMMU_DOMAIN_NAME_LEN 32 struct iommu_domain { unsigned type; const struct iommu_ops *ops; @@ -94,6 +109,8 @@ struct iommu_domain { void *handler_token; struct iommu_domain_geometry geometry; void *iova_cookie; + bool is_debug_domain; + char name[IOMMU_DOMAIN_NAME_LEN]; }; enum iommu_cap { @@ -114,6 +131,11 @@ enum iommu_cap { * DOMAIN_ATTR_FSL_PAMUV1 corresponds to the above mentioned contraints. * The caller can invoke iommu_domain_get_attr to check if the underlying * iommu implementation supports these constraints. + * + * DOMAIN_ATTR_NO_CFRE + * Some bus implementations may enter a bad state if iommu reports an error + * on context fault. As context faults are not always fatal, this must be + * avoided. */ enum iommu_attr { @@ -124,6 +146,27 @@ enum iommu_attr { DOMAIN_ATTR_FSL_PAMU_ENABLE, DOMAIN_ATTR_FSL_PAMUV1, DOMAIN_ATTR_NESTING, /* two stages of translation */ + DOMAIN_ATTR_PT_BASE_ADDR, + DOMAIN_ATTR_CONTEXT_BANK, + DOMAIN_ATTR_DYNAMIC, + DOMAIN_ATTR_TTBR0, + DOMAIN_ATTR_CONTEXTIDR, + DOMAIN_ATTR_PROCID, + DOMAIN_ATTR_NON_FATAL_FAULTS, + DOMAIN_ATTR_S1_BYPASS, + DOMAIN_ATTR_ATOMIC, + DOMAIN_ATTR_SECURE_VMID, + DOMAIN_ATTR_FAST, + DOMAIN_ATTR_PGTBL_INFO, + DOMAIN_ATTR_USE_UPSTREAM_HINT, + DOMAIN_ATTR_EARLY_MAP, + DOMAIN_ATTR_PAGE_TABLE_IS_COHERENT, + DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT, + DOMAIN_ATTR_BITMAP_IOVA_ALLOCATOR, + DOMAIN_ATTR_USE_LLC_NWA, + DOMAIN_ATTR_FAULT_MODEL_NO_CFRE, + DOMAIN_ATTR_FAULT_MODEL_NO_STALL, + DOMAIN_ATTR_FAULT_MODEL_HUPCF, DOMAIN_ATTR_MAX, }; @@ -155,6 +198,8 @@ struct iommu_resv_region { enum iommu_resv_type type; }; +extern struct dentry *iommu_debugfs_top; + #ifdef CONFIG_IOMMU_API /** @@ -166,11 +211,14 @@ struct iommu_resv_region { * @detach_dev: detach device from an iommu domain * @map: map a physically contiguous memory region to an iommu domain * @unmap: unmap a physically contiguous memory region from an iommu domain + * @map_sg: map a scatter-gather list of physically contiguous memory chunks + * to an iommu domain * @flush_tlb_all: Synchronously flush all hardware TLBs for this domain * @tlb_range_add: Add a given iova range to the flush queue for this domain * @tlb_sync: Flush all queued ranges from the hardware TLBs and empty flush * queue * @iova_to_phys: translate iova to physical address + * @iova_to_phys_hard: translate iova to physical address using IOMMU hardware * @add_device: add device to iommu grouping * @remove_device: remove device from iommu grouping * @device_group: find iommu group for a particular device @@ -185,6 +233,10 @@ struct iommu_resv_region { * @domain_get_windows: Return the number of windows for a domain * @of_xlate: add OF master IDs to iommu grouping * @pgsize_bitmap: bitmap of all possible supported page sizes + * @trigger_fault: trigger a fault on the device attached to an iommu domain + * @tlbi_domain: Invalidate all TLBs covering an iommu domain + * @enable_config_clocks: Enable all config clocks for this domain's IOMMU + * @disable_config_clocks: Disable all config clocks for this domain's IOMMU */ struct iommu_ops { bool (*capable)(enum iommu_cap); @@ -199,11 +251,15 @@ struct iommu_ops { phys_addr_t paddr, size_t size, int prot); size_t (*unmap)(struct iommu_domain *domain, unsigned long iova, size_t size); + size_t (*map_sg)(struct iommu_domain *domain, unsigned long iova, + struct scatterlist *sg, unsigned int nents, int prot); void (*flush_iotlb_all)(struct iommu_domain *domain); void (*iotlb_range_add)(struct iommu_domain *domain, unsigned long iova, size_t size); void (*iotlb_sync)(struct iommu_domain *domain); phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova); + phys_addr_t (*iova_to_phys_hard)(struct iommu_domain *domain, + dma_addr_t iova); int (*add_device)(struct device *dev); void (*remove_device)(struct device *dev); struct iommu_group *(*device_group)(struct device *dev); @@ -227,10 +283,17 @@ struct iommu_ops { int (*domain_set_windows)(struct iommu_domain *domain, u32 w_count); /* Get the number of windows per domain */ u32 (*domain_get_windows)(struct iommu_domain *domain); + void (*trigger_fault)(struct iommu_domain *domain, unsigned long flags); + void (*tlbi_domain)(struct iommu_domain *domain); + int (*enable_config_clocks)(struct iommu_domain *domain); + void (*disable_config_clocks)(struct iommu_domain *domain); + uint64_t (*iova_to_pte)(struct iommu_domain *domain, + dma_addr_t iova); int (*of_xlate)(struct device *dev, struct of_phandle_args *args); bool (*is_attach_deferred)(struct iommu_domain *domain, struct device *dev); + bool (*is_iova_coherent)(struct iommu_domain *domain, dma_addr_t iova); unsigned long pgsize_bitmap; }; @@ -293,6 +356,8 @@ extern int iommu_attach_device(struct iommu_domain *domain, extern void iommu_detach_device(struct iommu_domain *domain, struct device *dev); extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev); +extern size_t iommu_pgsize(unsigned long pgsize_bitmap, + unsigned long addr_merge, size_t size); extern int iommu_map(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, int prot); extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, @@ -300,8 +365,17 @@ extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, extern size_t iommu_unmap_fast(struct iommu_domain *domain, unsigned long iova, size_t size); extern size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, - struct scatterlist *sg,unsigned int nents, int prot); + struct scatterlist *sg, unsigned int nents, + int prot); +extern size_t default_iommu_map_sg(struct iommu_domain *domain, + unsigned long iova, + struct scatterlist *sg, unsigned int nents, + int prot); extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova); +extern phys_addr_t iommu_iova_to_phys_hard(struct iommu_domain *domain, + dma_addr_t iova); +extern bool iommu_is_iova_coherent(struct iommu_domain *domain, + dma_addr_t iova); extern void iommu_set_fault_handler(struct iommu_domain *domain, iommu_fault_handler_t handler, void *token); @@ -351,6 +425,9 @@ extern int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr, int prot); extern void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr); +extern uint64_t iommu_iova_to_pte(struct iommu_domain *domain, + dma_addr_t iova); + extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev, unsigned long iova, int flags); @@ -373,11 +450,38 @@ static inline void iommu_tlb_sync(struct iommu_domain *domain) domain->ops->iotlb_sync(domain); } +extern void iommu_trigger_fault(struct iommu_domain *domain, + unsigned long flags); + +extern unsigned long iommu_reg_read(struct iommu_domain *domain, + unsigned long offset); +extern void iommu_reg_write(struct iommu_domain *domain, unsigned long offset, + unsigned long val); + /* PCI device grouping function */ extern struct iommu_group *pci_device_group(struct device *dev); /* Generic device grouping function */ extern struct iommu_group *generic_device_group(struct device *dev); +static inline void iommu_tlbiall(struct iommu_domain *domain) +{ + if (domain->ops->tlbi_domain) + domain->ops->tlbi_domain(domain); +} + +static inline int iommu_enable_config_clocks(struct iommu_domain *domain) +{ + if (domain->ops->enable_config_clocks) + return domain->ops->enable_config_clocks(domain); + return 0; +} + +static inline void iommu_disable_config_clocks(struct iommu_domain *domain) +{ + if (domain->ops->disable_config_clocks) + domain->ops->disable_config_clocks(domain); +} + /** * struct iommu_fwspec - per-device IOMMU instance data * @ops: ops for this device's IOMMU @@ -399,6 +503,7 @@ int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, void iommu_fwspec_free(struct device *dev); int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids); const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode); +int iommu_is_available(struct device *dev); #else /* CONFIG_IOMMU_API */ @@ -502,6 +607,18 @@ static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_ad return 0; } +static inline phys_addr_t iommu_iova_to_phys_hard(struct iommu_domain *domain, + dma_addr_t iova) +{ + return 0; +} + +static inline bool iommu_is_iova_coherent(struct iommu_domain *domain, + dma_addr_t iova) +{ + return 0; +} + static inline void iommu_set_fault_handler(struct iommu_domain *domain, iommu_fault_handler_t handler, void *token) { @@ -661,6 +778,35 @@ static inline void iommu_device_unlink(struct device *dev, struct device *link) { } +static inline void iommu_trigger_fault(struct iommu_domain *domain, + unsigned long flags) +{ +} + +static inline unsigned long iommu_reg_read(struct iommu_domain *domain, + unsigned long offset) +{ + return 0; +} + +static inline void iommu_reg_write(struct iommu_domain *domain, + unsigned long val, unsigned long offset) +{ +} + +static inline void iommu_tlbiall(struct iommu_domain *domain) +{ +} + +static inline int iommu_enable_config_clocks(struct iommu_domain *domain) +{ + return 0; +} + +static inline void iommu_disable_config_clocks(struct iommu_domain *domain) +{ +} + static inline int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, const struct iommu_ops *ops) @@ -684,6 +830,10 @@ const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode) return NULL; } +static inline int iommu_is_available(struct device *dev) +{ + return -ENODEV; +} #endif /* CONFIG_IOMMU_API */ #ifdef CONFIG_IOMMU_DEBUGFS diff --git a/include/linux/ioport.h b/include/linux/ioport.h index da0ebaec25f0a1648d36e5edaf7fe5e9c0d5493b..a86dc4d7ff22e997ecb3da9a7c4fd676ddbd772c 100644 --- a/include/linux/ioport.h +++ b/include/linux/ioport.h @@ -12,6 +12,7 @@ #ifndef __ASSEMBLY__ #include #include +#include /* * Resources are tree-like, allowing * nesting etc.. @@ -23,6 +24,11 @@ struct resource { unsigned long flags; unsigned long desc; struct resource *parent, *sibling, *child; + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); }; /* diff --git a/include/linux/iova.h b/include/linux/iova.h index 84fbe73d2ec08adabd746c2918733ecb7c1ccba3..8c48bfaad8a7739ed6c961ac17a1f000d26e22dc 100644 --- a/include/linux/iova.h +++ b/include/linux/iova.h @@ -74,6 +74,7 @@ struct iova_domain { struct rb_node *cached32_node; /* Save last 32-bit alloced node */ unsigned long granule; /* pfn granularity for this domain */ unsigned long start_pfn; /* Lower limit for this domain */ + unsigned long end_pfn; /* Upper limit for this domain */ unsigned long dma_32bit_pfn; struct iova anchor; /* rbtree lookup anchor */ struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE]; /* IOVA range caches */ @@ -96,6 +97,7 @@ struct iova_domain { flush-queues */ atomic_t fq_timer_on; /* 1 when timer is active, 0 when not */ + bool best_fit; }; static inline unsigned long iova_size(struct iova *iova) diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 4a0fc3bce67beb35bfe374fee3c43f8f94f07c91..a6a4f6bdabaa75bc523a627925a9033850787952 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -3,6 +3,7 @@ #define _IPV6_H #include +#include #define ipv6_optlen(p) (((p)->hdrlen+1) << 3) #define ipv6_authlen(p) (((p)->hdrlen+2) << 2) @@ -77,6 +78,11 @@ struct ipv6_devconf { __s32 ndisc_tclass; struct ctl_table_header *sysctl_header; + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); }; struct ipv6_params { diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index a78afdfb6b77c8e4aaf292275eaf00863875572d..cf27ed81a31ee52cba795a49d7c8934482757129 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h @@ -35,6 +35,7 @@ #include #include #include +#include struct device_node; struct irq_domain; @@ -174,6 +175,11 @@ struct irq_domain { struct dentry *debugfs_file; #endif + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); + /* reverse map data. The linear map gets appended to the irq_domain */ irq_hw_number_t hwirq_max; unsigned int revmap_direct_max_irq; diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h index fa928242567db30769839ac8738be5dc58e372ab..6f0d546b4f498707bdb1f2a546abd9a856cb22b9 100644 --- a/include/linux/jiffies.h +++ b/include/linux/jiffies.h @@ -8,6 +8,7 @@ #include #include #include +#include #include /* for HZ */ #include @@ -59,9 +60,6 @@ extern int register_refined_jiffies(long clock_tick_rate); -/* TICK_NSEC is the time between ticks in nsec assuming SHIFTED_HZ */ -#define TICK_NSEC ((NSEC_PER_SEC+HZ/2)/HZ) - /* TICK_USEC is the time between ticks in usec assuming SHIFTED_HZ */ #define TICK_USEC ((USEC_PER_SEC + HZ/2) / HZ) diff --git a/include/linux/kernel.h b/include/linux/kernel.h index f6f94e54ab966d1b9080bf43845c2aaa85ea2ee1..7a4e8e20ef178c4b721a2b863939b6206b597605 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -2,7 +2,6 @@ #ifndef _LINUX_KERNEL_H #define _LINUX_KERNEL_H - #include #include #include @@ -15,34 +14,7 @@ #include #include #include - -#define USHRT_MAX ((u16)(~0U)) -#define SHRT_MAX ((s16)(USHRT_MAX>>1)) -#define SHRT_MIN ((s16)(-SHRT_MAX - 1)) -#define INT_MAX ((int)(~0U>>1)) -#define INT_MIN (-INT_MAX - 1) -#define UINT_MAX (~0U) -#define LONG_MAX ((long)(~0UL>>1)) -#define LONG_MIN (-LONG_MAX - 1) -#define ULONG_MAX (~0UL) -#define LLONG_MAX ((long long)(~0ULL>>1)) -#define LLONG_MIN (-LLONG_MAX - 1) -#define ULLONG_MAX (~0ULL) -#define SIZE_MAX (~(size_t)0) -#define PHYS_ADDR_MAX (~(phys_addr_t)0) - -#define U8_MAX ((u8)~0U) -#define S8_MAX ((s8)(U8_MAX>>1)) -#define S8_MIN ((s8)(-S8_MAX - 1)) -#define U16_MAX ((u16)~0U) -#define S16_MAX ((s16)(U16_MAX>>1)) -#define S16_MIN ((s16)(-S16_MAX - 1)) -#define U32_MAX ((u32)~0U) -#define S32_MAX ((s32)(U32_MAX>>1)) -#define S32_MIN ((s32)(-S32_MAX - 1)) -#define U64_MAX ((u64)~0ULL) -#define S64_MAX ((s64)(U64_MAX>>1)) -#define S64_MIN ((s64)(-S64_MAX - 1)) +#include #define STACK_MAGIC 0xdeadbeef @@ -323,6 +295,7 @@ static inline void might_fault(void) { } #endif extern struct atomic_notifier_head panic_notifier_list; +extern void (*vendor_panic_cb)(u64 sp); extern long (*panic_blink)(int state); __printf(1, 2) void panic(const char *fmt, ...) __noreturn __cold; diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h index b85cc8ebc39d2a91bfb9fed30749d0f77e04fee5..935e4b1c578a5b776f05d6305979090059cbf965 100644 --- a/include/linux/kernfs.h +++ b/include/linux/kernfs.h @@ -17,6 +17,7 @@ #include #include #include +#include struct file; struct dentry; @@ -178,6 +179,11 @@ struct kernfs_syscall_ops { const char *new_name); int (*show_path)(struct seq_file *sf, struct kernfs_node *kn, struct kernfs_root *root); + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); }; struct kernfs_root { @@ -271,6 +277,9 @@ struct kernfs_ops { #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lock_class_key lockdep_key; #endif + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); }; #ifdef CONFIG_KERNFS diff --git a/include/linux/key-type.h b/include/linux/key-type.h index d3c5ae8ad498c64886bc1cd83551caba941a4075..3341ddac2348b5baaf5a60a060c46f70bd3a060c 100644 --- a/include/linux/key-type.h +++ b/include/linux/key-type.h @@ -125,7 +125,7 @@ struct key_type { * much is copied into the buffer * - shouldn't do the copy if the buffer is NULL */ - long (*read)(const struct key *key, char __user *buffer, size_t buflen); + long (*read)(const struct key *key, char *buffer, size_t buflen); /* handle request_key() for this type instead of invoking * /sbin/request-key (optional) diff --git a/include/linux/keyslot-manager.h b/include/linux/keyslot-manager.h index cd65bea927db4d90b410c3cfbfe1dabf55127fed..f5e0eed468b006acd2e24f3c3217a8e7bd800f2b 100644 --- a/include/linux/keyslot-manager.h +++ b/include/linux/keyslot-manager.h @@ -58,6 +58,9 @@ struct keyslot_manager *keyslot_manager_create( const unsigned int crypto_mode_supported[BLK_ENCRYPTION_MODE_MAX], void *ll_priv_data); +void keyslot_manager_set_max_dun_bytes(struct keyslot_manager *ksm, + unsigned int max_dun_bytes); + int keyslot_manager_get_slot_for_key(struct keyslot_manager *ksm, const struct blk_crypto_key *key); @@ -67,6 +70,7 @@ void keyslot_manager_put_slot(struct keyslot_manager *ksm, unsigned int slot); bool keyslot_manager_crypto_mode_supported(struct keyslot_manager *ksm, enum blk_crypto_mode_num crypto_mode, + unsigned int dun_bytes, unsigned int data_unit_size, bool is_hw_wrapped_key); diff --git a/include/linux/kobject.h b/include/linux/kobject.h index 9744ae93a6f2fed361c56c19585464a40238ba69..d0034c374a8b32ee9a32897ec75d62d40d41cd89 100644 --- a/include/linux/kobject.h +++ b/include/linux/kobject.h @@ -27,6 +27,7 @@ #include #include #include +#include #define UEVENT_HELPER_PATH_LEN 256 #define UEVENT_NUM_ENVP 64 /* number of env pointers */ @@ -78,6 +79,11 @@ struct kobject { unsigned int state_add_uevent_sent:1; unsigned int state_remove_uevent_sent:1; unsigned int uevent_suppress:1; + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); }; extern __printf(2, 3) @@ -143,6 +149,11 @@ struct kobj_type { const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj); const void *(*namespace)(struct kobject *kobj); void (*get_ownership)(struct kobject *kobj, kuid_t *uid, kgid_t *gid); + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); }; struct kobj_uevent_env { @@ -194,6 +205,11 @@ struct kset { spinlock_t list_lock; struct kobject kobj; const struct kset_uevent_ops *uevent_ops; + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); } __randomize_layout; extern void kset_init(struct kset *kset); diff --git a/include/linux/ktime.h b/include/linux/ktime.h index b2bb44f87f5a3edb6ee6f179c83fcde42a363fe5..1fcfce97a020284093c28bcef815ec12ec32cc72 100644 --- a/include/linux/ktime.h +++ b/include/linux/ktime.h @@ -253,14 +253,7 @@ static inline __must_check bool ktime_to_timespec64_cond(const ktime_t kt, } } -/* - * The resolution of the clocks. The resolution value is returned in - * the clock_getres() system call to give application programmers an - * idea of the (in)accuracy of timers. Timer values are rounded up to - * this resolution values. - */ -#define LOW_RES_NSEC TICK_NSEC -#define KTIME_LOW_RES (LOW_RES_NSEC) +#include static inline ktime_t ns_to_ktime(u64 ns) { diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 0f99ecc01bc7d5128e5c88b97bb4131a0bf0cda7..92c6f80e6327d8b6c6d3c7406764b206e7943a7a 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -206,6 +206,32 @@ enum { READING_SHADOW_PAGE_TABLES, }; +#define KVM_UNMAPPED_PAGE ((void *) 0x500 + POISON_POINTER_DELTA) + +struct kvm_host_map { + /* + * Only valid if the 'pfn' is managed by the host kernel (i.e. There is + * a 'struct page' for it. When using mem= kernel parameter some memory + * can be used as guest memory but they are not managed by host + * kernel). + * If 'pfn' is not managed by the host kernel, this field is + * initialized to KVM_UNMAPPED_PAGE. + */ + struct page *page; + void *hva; + kvm_pfn_t pfn; + kvm_pfn_t gfn; +}; + +/* + * Used to check if the mapping is valid or not. Never use 'kvm_host_map' + * directly to check for that. + */ +static inline bool kvm_vcpu_mapped(struct kvm_host_map *map) +{ + return !!map->hva; +} + /* * Sometimes a large or cross-page mmio needs to be broken up into separate * exits for userspace servicing. @@ -682,6 +708,7 @@ void kvm_set_pfn_dirty(kvm_pfn_t pfn); void kvm_set_pfn_accessed(kvm_pfn_t pfn); void kvm_get_pfn(kvm_pfn_t pfn); +void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache); int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, int len); int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data, @@ -711,7 +738,13 @@ struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu); struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn); kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn); kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn); +int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map); +int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map, + struct gfn_to_pfn_cache *cache, bool atomic); struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn); +void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty); +int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map, + struct gfn_to_pfn_cache *cache, bool dirty, bool atomic); unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn); unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable); int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset, @@ -966,7 +999,7 @@ search_memslots(struct kvm_memslots *slots, gfn_t gfn) start = slot + 1; } - if (gfn >= memslots[start].base_gfn && + if (start < slots->used_slots && gfn >= memslots[start].base_gfn && gfn < memslots[start].base_gfn + memslots[start].npages) { atomic_set(&slots->lru_slot, start); return &memslots[start]; diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h index 8bf259dae9f6cc278bb2c8623507c452f8d61694..a38729c8296f424279153af67ee558e9adafb635 100644 --- a/include/linux/kvm_types.h +++ b/include/linux/kvm_types.h @@ -32,7 +32,7 @@ struct kvm_memslots; enum kvm_mr_change; -#include +#include /* * Address types: @@ -63,4 +63,11 @@ struct gfn_to_hva_cache { struct kvm_memory_slot *memslot; }; +struct gfn_to_pfn_cache { + u64 generation; + gfn_t gfn; + kvm_pfn_t pfn; + bool dirty; +}; + #endif /* __KVM_TYPES_H__ */ diff --git a/include/linux/limits.h b/include/linux/limits.h new file mode 100644 index 0000000000000000000000000000000000000000..7fc497ee139362cb4a93bbc00a82402e519de26d --- /dev/null +++ b/include/linux/limits.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_LIMITS_H +#define _LINUX_LIMITS_H + +#include +#include +#include + +#define SIZE_MAX (~(size_t)0) +#define PHYS_ADDR_MAX (~(phys_addr_t)0) + +#define U8_MAX ((u8)~0U) +#define S8_MAX ((s8)(U8_MAX >> 1)) +#define S8_MIN ((s8)(-S8_MAX - 1)) +#define U16_MAX ((u16)~0U) +#define S16_MAX ((s16)(U16_MAX >> 1)) +#define S16_MIN ((s16)(-S16_MAX - 1)) +#define U32_MAX ((u32)~0U) +#define S32_MAX ((s32)(U32_MAX >> 1)) +#define S32_MIN ((s32)(-S32_MAX - 1)) +#define U64_MAX ((u64)~0ULL) +#define S64_MAX ((s64)(U64_MAX >> 1)) +#define S64_MIN ((s64)(-S64_MAX - 1)) + +#endif /* _LINUX_LIMITS_H */ diff --git a/include/linux/math64.h b/include/linux/math64.h index bb2c84afb80c6c2246d233f7acab29c33bdb372e..38e4db5e0c67821cb85c1dfdd63e50369966ddad 100644 --- a/include/linux/math64.h +++ b/include/linux/math64.h @@ -3,6 +3,7 @@ #define _LINUX_MATH64_H #include +#include #include #if BITS_PER_LONG == 64 @@ -142,25 +143,6 @@ static inline s64 div_s64(s64 dividend, s32 divisor) u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder); -static __always_inline u32 -__iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder) -{ - u32 ret = 0; - - while (dividend >= divisor) { - /* The following asm() prevents the compiler from - optimising this loop into a modulo operation. */ - asm("" : "+rm"(dividend)); - - dividend -= divisor; - ret++; - } - - *remainder = dividend; - - return ret; -} - #ifndef mul_u32_u32 /* * Many a GCC version messes this up and generates a 64x64 mult :-( diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 76b76b6aa83d0634af2ba7999e1d925683456ab9..b87b1569d15b5c190d6a02d8cef09cdc77b3ae5d 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -672,7 +672,14 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits { u8 swp[0x1]; u8 swp_csum[0x1]; u8 swp_lso[0x1]; - u8 reserved_at_23[0xd]; + u8 cqe_checksum_full[0x1]; + u8 tunnel_stateless_geneve_tx[0x1]; + u8 tunnel_stateless_mpls_over_udp[0x1]; + u8 tunnel_stateless_mpls_over_gre[0x1]; + u8 tunnel_stateless_vxlan_gpe[0x1]; + u8 tunnel_stateless_ipv4_over_vxlan[0x1]; + u8 tunnel_stateless_ip_over_ip[0x1]; + u8 reserved_at_2a[0x6]; u8 max_vxlan_udp_ports[0x8]; u8 reserved_at_38[0x6]; u8 max_geneve_opt_len[0x1]; diff --git a/include/linux/mm.h b/include/linux/mm.h index c06305ce27d51f21a87685323c86e5121721dcb7..20773ec60a81144c2939fce6f8d5961ca2828b0b 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -26,6 +26,7 @@ #include #include #include +#include struct mempolicy; struct anon_vma; @@ -461,6 +462,11 @@ struct vm_operations_struct { */ struct page *(*find_special_page)(struct vm_area_struct *vma, unsigned long addr); + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); }; static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm) @@ -2239,7 +2245,6 @@ extern void zone_pcp_reset(struct zone *zone); /* page_alloc.c */ extern int min_free_kbytes; -extern int watermark_boost_factor; extern int watermark_scale_factor; /* nommu.c */ diff --git a/include/linux/mm_event.h b/include/linux/mm_event.h new file mode 100644 index 0000000000000000000000000000000000000000..71962f3955f4954ce4e1b3537b6c3b40b89f8345 --- /dev/null +++ b/include/linux/mm_event.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_MM_EVENT_H +#define _LINUX_MM_EVENT_H + +enum mm_event_type { + MM_MIN_FAULT = 0, + MM_MAJ_FAULT = 1, + MM_READ_IO = 2, + MM_COMPACTION = 3, + MM_RECLAIM = 4, + MM_SWP_FAULT = 5, + MM_KERN_ALLOC = 6, + MM_TYPE_NUM = 7, +}; + +struct mm_event_task { + unsigned int count; + unsigned int max_lat; + u64 accm_lat; +} __attribute__ ((packed)); + +#endif diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 3f6ab003e4bf0264344d5ffe6a8122532b3729d6..718ba2e338979d61745a019036012c8804dad92a 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -14,6 +14,7 @@ #include #include #include +#include #include @@ -335,6 +336,11 @@ struct vm_area_struct { struct mempolicy *vm_policy; /* NUMA policy for the VMA */ #endif struct vm_userfaultfd_ctx vm_userfaultfd_ctx; + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); } __randomize_layout; struct core_thread { diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h index 134a6483347a1e565d91c812c1e1212e989463bb..3485912a72bdbce8b0aec018eb9f5ae463174278 100644 --- a/include/linux/mmc/core.h +++ b/include/linux/mmc/core.h @@ -168,8 +168,25 @@ struct mmc_request { bool cap_cmd_during_tfr; int tag; +#ifdef CONFIG_MMC_CRYPTO + int crypto_key_slot; + u64 data_unit_num; + const struct blk_crypto_key *crypto_key; +#endif }; +#ifdef CONFIG_MMC_CRYPTO +static inline bool mmc_request_crypto_enabled(const struct mmc_request *mrq) +{ + return mrq->crypto_key != NULL; +} +#else +static inline bool mmc_request_crypto_enabled(const struct mmc_request *mrq) +{ + return false; +} +#endif + struct mmc_card; void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq); diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 7e8e5b20e82b06085f22e3c0813d3369f93cce64..03f6572f73e64d1751059ff802e216bdae5a9460 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -368,6 +368,7 @@ struct mmc_host { #define MMC_CAP2_CQE (1 << 23) /* Has eMMC command queue engine */ #define MMC_CAP2_CQE_DCMD (1 << 24) /* CQE can issue a direct command */ #define MMC_CAP2_AVOID_3_3V (1 << 25) /* Host must negotiate down from 3.3V */ +#define MMC_CAP2_CRYPTO (1 << 27) /* Host supports inline encryption */ int fixed_drv_type; /* fixed driver type for non-removable media */ @@ -459,6 +460,9 @@ struct mmc_host { int cqe_qdepth; bool cqe_enabled; bool cqe_on; +#ifdef CONFIG_MMC_CRYPTO + struct keyslot_manager *ksm; +#endif /* CONFIG_MMC_CRYPTO */ unsigned long private[0] ____cacheline_aligned; }; diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index 133ba78820ee5f2da69b865df43689c0a736ba3e..bf1a874287b52391aafe0c9062d625d3bee86f20 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h @@ -7,6 +7,7 @@ #include #include #include +#include struct mmu_notifier; struct mmu_notifier_ops; @@ -188,6 +189,11 @@ struct mmu_notifier_ops { */ void (*invalidate_range)(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long start, unsigned long end); + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); }; /* @@ -204,6 +210,9 @@ struct mmu_notifier_ops { struct mmu_notifier { struct hlist_node hlist; const struct mmu_notifier_ops *ops; + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); }; static inline int mm_has_notifiers(struct mm_struct *mm) diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 70412ab9a706027e1697d0fbffca4cc00b0980e3..b4f3370f91e0c04c9a3ab9bfc741d3e0f7aa1e4c 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -18,6 +18,7 @@ #include #include #include +#include #include /* Free memory management - zoned buddy allocator. */ @@ -188,6 +189,8 @@ enum node_stat_item { NR_WRITTEN, /* page writings since bootup */ NR_KERNEL_MISC_RECLAIMABLE, /* reclaimable non-slab kernel pages */ NR_UNRECLAIMABLE_PAGES, + NR_ION_HEAP, + NR_ION_HEAP_POOL, NR_VM_NODE_STAT_ITEMS }; @@ -274,10 +277,9 @@ enum zone_watermarks { NR_WMARK }; -#define min_wmark_pages(z) (z->_watermark[WMARK_MIN] + z->watermark_boost) -#define low_wmark_pages(z) (z->_watermark[WMARK_LOW] + z->watermark_boost) -#define high_wmark_pages(z) (z->_watermark[WMARK_HIGH] + z->watermark_boost) -#define wmark_pages(z, i) (z->_watermark[i] + z->watermark_boost) +#define min_wmark_pages(z) (z->watermark[WMARK_MIN]) +#define low_wmark_pages(z) (z->watermark[WMARK_LOW]) +#define high_wmark_pages(z) (z->watermark[WMARK_HIGH]) struct per_cpu_pages { int count; /* number of pages in the list */ @@ -368,8 +370,7 @@ struct zone { /* Read-mostly fields */ /* zone watermarks, access with *_wmark_pages(zone) macros */ - unsigned long _watermark[NR_WMARK]; - unsigned long watermark_boost; + unsigned long watermark[NR_WMARK]; unsigned long nr_reserved_highatomic; @@ -495,8 +496,6 @@ struct zone { unsigned long compact_cached_free_pfn; /* pfn where async and sync compaction migration scanner should start */ unsigned long compact_cached_migrate_pfn[2]; - unsigned long compact_init_migrate_pfn; - unsigned long compact_init_free_pfn; #endif #ifdef CONFIG_COMPACTION @@ -521,6 +520,11 @@ struct zone { /* Zone statistics */ atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; atomic_long_t vm_numa_stat[NR_VM_NUMA_STAT_ITEMS]; + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); } ____cacheline_internodealigned_in_smp; enum pgdat_flags { @@ -899,8 +903,6 @@ static inline int is_highmem(struct zone *zone) struct ctl_table; int min_free_kbytes_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); -int watermark_boost_factor_sysctl_handler(struct ctl_table *, int, - void __user *, size_t *, loff_t *); int watermark_scale_factor_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES]; diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index 867db9b9384c09b534f4189709c5623d5d13191c..bd297511becfcc85089c51a5ca4213b74de16772 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -299,7 +299,7 @@ struct pcmcia_device_id { #define INPUT_DEVICE_ID_LED_MAX 0x0f #define INPUT_DEVICE_ID_SND_MAX 0x07 #define INPUT_DEVICE_ID_FF_MAX 0x7f -#define INPUT_DEVICE_ID_SW_MAX 0x0f +#define INPUT_DEVICE_ID_SW_MAX 0x20 #define INPUT_DEVICE_ID_PROP_MAX 0x1f #define INPUT_DEVICE_ID_MATCH_BUS 1 diff --git a/include/linux/module.h b/include/linux/module.h index 1c513aefb7880aed42077116105b30a7cf7b1f6c..e4bb2855f0c7d963f0174a9d50d3626b8c9c75e9 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -22,6 +22,7 @@ #include #include #include +#include #include #include @@ -489,6 +490,10 @@ struct module { struct error_injection_entry *ei_funcs; unsigned int num_ei_funcs; #endif + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); } ____cacheline_aligned __randomize_layout; #ifndef MODULE_ARCH_INIT #define MODULE_ARCH_INIT {} diff --git a/include/linux/mount.h b/include/linux/mount.h index 66456505c7019c3264a31b57e7255f564b311e01..adcc8f01c3162e3a1f8839b2a70dcc433e01c217 100644 --- a/include/linux/mount.h +++ b/include/linux/mount.h @@ -16,6 +16,7 @@ #include #include #include +#include struct super_block; struct vfsmount; @@ -68,6 +69,10 @@ struct vfsmount { struct dentry *mnt_root; /* root of the mounted tree */ struct super_block *mnt_sb; /* pointer to superblock */ int mnt_flags; + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); void *data; } __randomize_layout; diff --git a/include/linux/net.h b/include/linux/net.h index e0930678c8bf6e6061020176e8013b6e33d8264d..0f6464129e60a26b6cf4a217bad9e1ad6a6e4cea 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -25,6 +25,7 @@ #include #include #include +#include #include @@ -198,6 +199,11 @@ struct proto_ops { int (*sendmsg_locked)(struct sock *sk, struct msghdr *msg, size_t size); int (*set_rcvlowat)(struct sock *sk, int val); + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); }; #define DECLARE_SOCKADDR(type, dst, src) \ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 37d383bf27eb89a461840076441886c174e21809..07ffa735e5771d33c0b63a9db40acf4e0c7150c8 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -52,6 +52,7 @@ #include #include #include +#include struct netpoll_info; struct device; @@ -274,6 +275,9 @@ struct header_ops { const struct net_device *dev, const unsigned char *haddr); bool (*validate)(const char *ll_header, unsigned int len); + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); }; /* These flag bits are private to the generic network queueing @@ -339,6 +343,11 @@ struct napi_struct { struct list_head dev_list; struct hlist_node napi_hash_node; unsigned int napi_id; + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); }; enum { @@ -598,6 +607,11 @@ struct netdev_queue { #ifdef CONFIG_BQL struct dql dql; #endif + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); } ____cacheline_aligned_in_smp; extern int sysctl_fb_tunnels_only_for_init_net; @@ -712,6 +726,11 @@ struct netdev_rx_queue { struct kobject kobj; struct net_device *dev; struct xdp_rxq_info xdp_rxq; + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); } ____cacheline_aligned_in_smp; /* @@ -888,6 +907,11 @@ struct xfrmdev_ops { bool (*xdo_dev_offload_ok) (struct sk_buff *skb, struct xfrm_state *x); void (*xdo_dev_state_advance_esn) (struct xfrm_state *x); + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); }; #endif @@ -910,6 +934,10 @@ struct tlsdev_ops { enum tls_offload_ctx_dir direction); void (*tls_dev_resync_rx)(struct net_device *netdev, struct sock *sk, u32 seq, u64 rcd_sn); + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); }; #endif @@ -1410,6 +1438,15 @@ struct net_device_ops { u32 flags); int (*ndo_xsk_async_xmit)(struct net_device *dev, u32 queue_id); + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); + ANDROID_KABI_RESERVE(5); + ANDROID_KABI_RESERVE(6); + ANDROID_KABI_RESERVE(7); + ANDROID_KABI_RESERVE(8); }; /** @@ -2032,6 +2069,16 @@ struct net_device { struct lock_class_key *qdisc_running_key; bool proto_down; unsigned wol_enabled:1; + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); + ANDROID_KABI_RESERVE(5); + ANDROID_KABI_RESERVE(6); + ANDROID_KABI_RESERVE(7); + ANDROID_KABI_RESERVE(8); + }; #define to_net_dev(d) container_of(d, struct net_device, dev) @@ -2347,6 +2394,11 @@ struct packet_type { struct sock *sk); void *af_packet_priv; struct list_head list; + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); }; struct offload_callbacks { @@ -2956,6 +3008,9 @@ struct softnet_data { unsigned int processed; unsigned int time_squeeze; unsigned int received_rps; + /* unused partner variable for ABI alignment */ + unsigned int gro_coalesced; + #ifdef CONFIG_RPS struct softnet_data *rps_ipi_list; #endif diff --git a/include/linux/notifier.h b/include/linux/notifier.h index f35c7bf7614302ee51f0896258a3dfc08d5224f8..0096a05395e380a35fe25a6329e05953eb66ab66 100644 --- a/include/linux/notifier.h +++ b/include/linux/notifier.h @@ -122,8 +122,7 @@ extern void srcu_init_notifier_head(struct srcu_notifier_head *nh); #ifdef CONFIG_TREE_SRCU #define _SRCU_NOTIFIER_HEAD(name, mod) \ - static DEFINE_PER_CPU(struct srcu_data, \ - name##_head_srcu_data); \ + static DEFINE_PER_CPU(struct srcu_data, name##_head_srcu_data); \ mod struct srcu_notifier_head name = \ SRCU_NOTIFIER_INIT(name, name##_head_srcu_data) diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h index 2f3ae41c212dccbcfb3e31d3d21b8121228546db..496ff759f84c69c8216315570c6ef78ae347582b 100644 --- a/include/linux/nvme-fc-driver.h +++ b/include/linux/nvme-fc-driver.h @@ -282,8 +282,6 @@ struct nvme_fc_remote_port { * * Host/Initiator Transport Entrypoints/Parameters: * - * @module: The LLDD module using the interface - * * @localport_delete: The LLDD initiates deletion of a localport via * nvme_fc_deregister_localport(). However, the teardown is * asynchronous. This routine is called upon the completion of the @@ -397,8 +395,6 @@ struct nvme_fc_remote_port { * Value is Mandatory. Allowed to be zero. */ struct nvme_fc_port_template { - struct module *module; - /* initiator-based functions */ void (*localport_delete)(struct nvme_fc_local_port *); void (*remoteport_delete)(struct nvme_fc_remote_port *); diff --git a/include/linux/nvmem-consumer.h b/include/linux/nvmem-consumer.h index 4e85447f78602754f56b988f576beb8ab8ab71bf..0389fe00b17748f2568c3dc059f4d412e3d3007d 100644 --- a/include/linux/nvmem-consumer.h +++ b/include/linux/nvmem-consumer.h @@ -55,6 +55,8 @@ ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem, int nvmem_device_cell_write(struct nvmem_device *nvmem, struct nvmem_cell_info *info, void *buf); +const char *nvmem_dev_name(struct nvmem_device *nvmem); + #else static inline struct nvmem_cell *nvmem_cell_get(struct device *dev, @@ -143,6 +145,12 @@ static inline int nvmem_device_write(struct nvmem_device *nvmem, { return -ENOSYS; } + +static inline const char *nvmem_dev_name(struct nvmem_device *nvmem) +{ + return NULL; +} + #endif /* CONFIG_NVMEM */ #if IS_ENABLED(CONFIG_NVMEM) && IS_ENABLED(CONFIG_OF) diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h index 24def6ad09bb0b7eae0636456dcb14af4f161203..c4b8430181dc080b2fc7bcc01f5251e5c56fbc98 100644 --- a/include/linux/nvmem-provider.h +++ b/include/linux/nvmem-provider.h @@ -67,6 +67,25 @@ struct nvmem_config { struct device *base_dev; }; +/** + * struct nvmem_cell_table - NVMEM cell definitions for given provider + * + * @nvmem_name: Provider name. + * @cells: Array of cell definitions. + * @ncells: Number of cell definitions in the array. + * @node: List node. + * + * This structure together with related helper functions is provided for users + * that don't can't access the nvmem provided structure but wish to register + * cell definitions for it e.g. board files registering an EEPROM device. + */ +struct nvmem_cell_table { + const char *nvmem_name; + const struct nvmem_cell_info *cells; + size_t ncells; + struct list_head node; +}; + #if IS_ENABLED(CONFIG_NVMEM) struct nvmem_device *nvmem_register(const struct nvmem_config *cfg); @@ -77,9 +96,9 @@ struct nvmem_device *devm_nvmem_register(struct device *dev, int devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem); -int nvmem_add_cells(struct nvmem_device *nvmem, - const struct nvmem_cell_info *info, - int ncells); +void nvmem_add_cell_table(struct nvmem_cell_table *table); +void nvmem_del_cell_table(struct nvmem_cell_table *table); + #else static inline struct nvmem_device *nvmem_register(const struct nvmem_config *c) @@ -105,12 +124,8 @@ devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem) } -static inline int nvmem_add_cells(struct nvmem_device *nvmem, - const struct nvmem_cell_info *info, - int ncells) -{ - return -ENOSYS; -} +static inline void nvmem_add_cell_table(struct nvmem_cell_table *table) {} +static inline void nvmem_del_cell_table(struct nvmem_cell_table *table) {} #endif /* CONFIG_NVMEM */ #endif /* ifndef _LINUX_NVMEM_PROVIDER_H */ diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h index f41f3c463307d3b107b376c758b4bd1075697acb..280bffadd0b4ee17be91bf6ef3012d7b9a6410fd 100644 --- a/include/linux/of_fdt.h +++ b/include/linux/of_fdt.h @@ -48,6 +48,8 @@ extern char __dtb_end[]; extern u64 of_flat_dt_translate_address(unsigned long node); extern void of_fdt_limit_memory(int limit); extern int of_fdt_get_ddrtype(void); +extern int of_fdt_get_ddrrank(int channel); +extern int of_fdt_get_ddrhbb(int channel, int rank); #endif /* CONFIG_OF_FLATTREE */ #ifdef CONFIG_OF_EARLY_FLATTREE diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 78cac3fc3ce544fba81c33ba1deaff3406ba7574..fed072d472fab9dcc77cb31826edefaa2542f822 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -461,9 +461,9 @@ static inline pgoff_t linear_page_index(struct vm_area_struct *vma, return pgoff; } -extern void __lock_page(struct page *page); -extern int __lock_page_killable(struct page *page); -extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm, +extern void __sched __lock_page(struct page *page); +extern int __sched __lock_page_killable(struct page *page); +extern int __sched __lock_page_or_retry(struct page *page, struct mm_struct *mm, unsigned int flags); extern void unlock_page(struct page *page); @@ -476,7 +476,7 @@ static inline int trylock_page(struct page *page) /* * lock_page may only be called if we have the page's inode pinned. */ -static inline void lock_page(struct page *page) +static inline __sched void lock_page(struct page *page) { might_sleep(); if (!trylock_page(page)) @@ -488,7 +488,7 @@ static inline void lock_page(struct page *page) * signals. It returns 0 if it locked the page and -EINTR if it was * killed while waiting. */ -static inline int lock_page_killable(struct page *page) +static inline __sched int lock_page_killable(struct page *page) { might_sleep(); if (!trylock_page(page)) @@ -503,8 +503,9 @@ static inline int lock_page_killable(struct page *page) * Return value and mmap_sem implications depend on flags; see * __lock_page_or_retry(). */ -static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm, - unsigned int flags) +static inline __sched int lock_page_or_retry(struct page *page, + struct mm_struct *mm, + unsigned int flags) { might_sleep(); return trylock_page(page) || __lock_page_or_retry(page, mm, flags); @@ -514,8 +515,8 @@ static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm, * This is exported only for wait_on_page_locked/wait_on_page_writeback, etc., * and should not be used directly. */ -extern void wait_on_page_bit(struct page *page, int bit_nr); -extern int wait_on_page_bit_killable(struct page *page, int bit_nr); +extern void __sched wait_on_page_bit(struct page *page, int bit_nr); +extern int __sched wait_on_page_bit_killable(struct page *page, int bit_nr); /* * Wait for a page to be unlocked. @@ -524,13 +525,13 @@ extern int wait_on_page_bit_killable(struct page *page, int bit_nr); * ie with increased "page->count" so that the page won't * go away during the wait.. */ -static inline void wait_on_page_locked(struct page *page) +static inline __sched void wait_on_page_locked(struct page *page) { if (PageLocked(page)) wait_on_page_bit(compound_head(page), PG_locked); } -static inline int wait_on_page_locked_killable(struct page *page) +static inline __sched int wait_on_page_locked_killable(struct page *page) { if (!PageLocked(page)) return 0; @@ -540,7 +541,7 @@ static inline int wait_on_page_locked_killable(struct page *page) /* * Wait for a page to complete writeback */ -static inline void wait_on_page_writeback(struct page *page) +static inline __sched void wait_on_page_writeback(struct page *page) { if (PageWriteback(page)) wait_on_page_bit(page, PG_writeback); diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h index 37dab81169015c5bda77c34a843c6c565806bcaa..931fda3e5e0d9b10dfdb43ee07efee46ae58912b 100644 --- a/include/linux/pci-epc.h +++ b/include/linux/pci-epc.h @@ -69,6 +69,7 @@ struct pci_epc_ops { * @bitmap: bitmap to manage the PCI address space * @pages: number of bits representing the address region * @page_size: size of each page + * @lock: mutex to protect bitmap */ struct pci_epc_mem { phys_addr_t phys_base; @@ -76,6 +77,8 @@ struct pci_epc_mem { unsigned long *bitmap; size_t page_size; int pages; + /* mutex to protect against concurrent access for memory allocation*/ + struct mutex lock; }; /** diff --git a/include/linux/pci.h b/include/linux/pci.h index f00ce74f3d546f9d9bf00872fc95dbfbf67e07d9..01dff69a3a215888b6169aff9d2820052fd91413 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -35,6 +35,7 @@ #include #include +#include /* * The PCI interface treats multi-function devices as independent @@ -447,6 +448,11 @@ struct pci_dev { char *driver_override; /* Driver name to force a match */ unsigned long priv_flags; /* Private flags for the PCI driver */ + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); }; static inline struct pci_dev *pci_physfn(struct pci_dev *dev) @@ -577,6 +583,11 @@ struct pci_bus { struct bin_attribute *legacy_io; /* Legacy I/O for this bus */ struct bin_attribute *legacy_mem; /* Legacy mem */ unsigned int is_added:1; + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); }; #define to_pci_bus(n) container_of(n, struct pci_bus, dev) @@ -768,6 +779,11 @@ struct pci_driver { const struct attribute_group **groups; struct device_driver driver; struct pci_dynids dynids; + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); }; #define to_pci_driver(drv) container_of(drv, struct pci_driver, driver) diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h index 4f052496cdfd79b4db4ac104384b212c5790f1ad..0a4f54dd4737b7e3124e9668550811066d8c6616 100644 --- a/include/linux/percpu_counter.h +++ b/include/linux/percpu_counter.h @@ -78,9 +78,9 @@ static inline s64 percpu_counter_read(struct percpu_counter *fbc) */ static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc) { - s64 ret = fbc->count; + /* Prevent reloads of fbc->count */ + s64 ret = READ_ONCE(fbc->count); - barrier(); /* Prevent reloads of fbc->count */ if (ret >= 0) return ret; return 0; diff --git a/include/linux/phy.h b/include/linux/phy.h index 42766e7179d383dfcb6f618760cdb1171c5a36af..8b1371d01ca0d69f203b5ba7855e4906084657fa 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -25,6 +25,7 @@ #include #include #include +#include #include @@ -481,6 +482,11 @@ struct phy_device { void (*phy_link_change)(struct phy_device *, bool up, bool do_carrier); void (*adjust_link)(struct net_device *dev); + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); }; #define to_phy_device(d) container_of(to_mdio_device(d), \ struct phy_device, mdio) @@ -660,6 +666,10 @@ struct phy_driver { struct ethtool_tunable *tuna, const void *data); int (*set_loopback)(struct phy_device *dev, bool enable); + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + }; #define to_phy_driver(d) container_of(to_mdio_common_driver(d), \ struct phy_driver, mdiodrv) diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index 697d8566e29b6047a17a464e1e5363bd5822133e..2dae1837a21a06099e22b3181ae9c18b7a5ed14c 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -59,6 +59,7 @@ enum { POWER_SUPPLY_HEALTH_COLD, POWER_SUPPLY_HEALTH_WATCHDOG_TIMER_EXPIRE, POWER_SUPPLY_HEALTH_SAFETY_TIMER_EXPIRE, + POWER_SUPPLY_HEALTH_OVERCURRENT, POWER_SUPPLY_HEALTH_WARM, POWER_SUPPLY_HEALTH_COOL, POWER_SUPPLY_HEALTH_HOT, @@ -387,6 +388,8 @@ enum power_supply_property { POWER_SUPPLY_PROP_IRQ_STATUS, POWER_SUPPLY_PROP_PARALLEL_OUTPUT_MODE, POWER_SUPPLY_PROP_ALIGNMENT, + POWER_SUPPLY_PROP_MOISTURE_DETECTION_ENABLE, + POWER_SUPPLY_PROP_FG_TYPE, /* Local extensions of type int64_t */ POWER_SUPPLY_PROP_CHARGE_COUNTER_EXT, POWER_SUPPLY_PROP_CHARGE_CHARGER_STATE, diff --git a/include/linux/printk.h b/include/linux/printk.h index cf3eccfe1543621cc3d91fec644a63bb6b1dc0f7..c812ceed6cfdd2bb6851ee1fbedd3cc2203178bb 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -9,6 +9,7 @@ #include extern const char linux_banner[]; +extern const char *linux_banner_ptr; extern const char linux_proc_banner[]; #define PRINTK_MAX_SINGLE_HEADER_LEN 2 diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h index 2dd0a9ed5b361472fbb1794b3b77f96fc69780ce..733fad7dfbed96c0c55f3d6b2f44ca5cede61a86 100644 --- a/include/linux/qed/qed_chain.h +++ b/include/linux/qed/qed_chain.h @@ -97,6 +97,11 @@ struct qed_chain_u32 { u32 cons_idx; }; +struct addr_tbl_entry { + void *virt_addr; + dma_addr_t dma_map; +}; + struct qed_chain { /* fastpath portion of the chain - required for commands such * as produce / consume. @@ -107,10 +112,11 @@ struct qed_chain { /* Fastpath portions of the PBL [if exists] */ struct { - /* Table for keeping the virtual addresses of the chain pages, - * respectively to the physical addresses in the pbl table. + /* Table for keeping the virtual and physical addresses of the + * chain pages, respectively to the physical addresses + * in the pbl table. */ - void **pp_virt_addr_tbl; + struct addr_tbl_entry *pp_addr_tbl; union { struct qed_chain_pbl_u16 u16; @@ -287,7 +293,7 @@ qed_chain_advance_page(struct qed_chain *p_chain, *(u32 *)page_to_inc = 0; page_index = *(u32 *)page_to_inc; } - *p_next_elem = p_chain->pbl.pp_virt_addr_tbl[page_index]; + *p_next_elem = p_chain->pbl.pp_addr_tbl[page_index].virt_addr; } } @@ -537,7 +543,7 @@ static inline void qed_chain_init_params(struct qed_chain *p_chain, p_chain->pbl_sp.p_phys_table = 0; p_chain->pbl_sp.p_virt_table = NULL; - p_chain->pbl.pp_virt_addr_tbl = NULL; + p_chain->pbl.pp_addr_tbl = NULL; } /** @@ -575,11 +581,11 @@ static inline void qed_chain_init_mem(struct qed_chain *p_chain, static inline void qed_chain_init_pbl_mem(struct qed_chain *p_chain, void *p_virt_pbl, dma_addr_t p_phys_pbl, - void **pp_virt_addr_tbl) + struct addr_tbl_entry *pp_addr_tbl) { p_chain->pbl_sp.p_phys_table = p_phys_pbl; p_chain->pbl_sp.p_virt_table = p_virt_pbl; - p_chain->pbl.pp_virt_addr_tbl = pp_virt_addr_tbl; + p_chain->pbl.pp_addr_tbl = pp_addr_tbl; } /** @@ -644,7 +650,7 @@ static inline void *qed_chain_get_last_elem(struct qed_chain *p_chain) break; case QED_CHAIN_MODE_PBL: last_page_idx = p_chain->page_cnt - 1; - p_virt_addr = p_chain->pbl.pp_virt_addr_tbl[last_page_idx]; + p_virt_addr = p_chain->pbl.pp_addr_tbl[last_page_idx].virt_addr; break; } /* p_virt_addr points at this stage to the last page of the chain */ @@ -716,7 +722,7 @@ static inline void qed_chain_pbl_zero_mem(struct qed_chain *p_chain) page_cnt = qed_chain_get_page_cnt(p_chain); for (i = 0; i < page_cnt; i++) - memset(p_chain->pbl.pp_virt_addr_tbl[i], 0, + memset(p_chain->pbl.pp_addr_tbl[i].virt_addr, 0, QED_CHAIN_PAGE_SIZE); } diff --git a/include/linux/quota.h b/include/linux/quota.h index 27aab84fcbaac91b857bfc5a34b736d6b9109c1f..3088ac929e56260c443f6a276b2e2e5e5a8b923e 100644 --- a/include/linux/quota.h +++ b/include/linux/quota.h @@ -316,6 +316,9 @@ struct quota_format_ops { int (*commit_dqblk)(struct dquot *dquot); /* Write structure for one user */ int (*release_dqblk)(struct dquot *dquot); /* Called when last reference to dquot is being dropped */ int (*get_next_id)(struct super_block *sb, struct kqid *qid); /* Get next ID with existing structure in the quota file */ + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); }; /* Operations working with dquots */ @@ -335,6 +338,9 @@ struct dquot_operations { int (*get_inode_usage) (struct inode *, qsize_t *); /* Get next ID with active quota structure */ int (*get_next_id) (struct super_block *sb, struct kqid *qid); + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); }; struct path; @@ -438,6 +444,9 @@ struct quotactl_ops { int (*set_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *); int (*get_state)(struct super_block *, struct qc_state *); int (*rm_xquota)(struct super_block *, unsigned int); + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); }; struct quota_format_type { diff --git a/include/linux/reboot.h b/include/linux/reboot.h index e63799a6e89515d0893606530b07e101f39c52fb..3734cd8f38a896d55af2ff3a3f3518707444c4fe 100644 --- a/include/linux/reboot.h +++ b/include/linux/reboot.h @@ -14,6 +14,7 @@ struct device; #define SYS_POWER_OFF 0x0003 /* Notify of system power off */ enum reboot_mode { + REBOOT_UNDEFINED = -1, REBOOT_COLD = 0, REBOOT_WARM, REBOOT_HARD, @@ -21,6 +22,7 @@ enum reboot_mode { REBOOT_GPIO, }; extern enum reboot_mode reboot_mode; +extern enum reboot_mode panic_reboot_mode; enum reboot_type { BOOT_TRIPLE = 't', diff --git a/include/linux/sched.h b/include/linux/sched.h index e0cdbcb3fc7b2309d1ff5486b44188bb25d2e4e2..1dc5077da58e91ccf3d55c1a4cf79843b06f8ebc 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -26,8 +26,10 @@ #include #include #include +#include #include #include +#include /* task_struct member predeclarations (sorted alphabetically): */ struct audit_context; @@ -485,6 +487,11 @@ struct sched_entity { */ struct sched_avg avg; #endif + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); }; struct sched_rt_entity { @@ -503,6 +510,11 @@ struct sched_rt_entity { /* rq "owned" by this entity/group: */ struct rt_rq *my_q; #endif + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); } __randomize_layout; struct sched_dl_entity { @@ -952,8 +964,8 @@ struct task_struct { struct seccomp seccomp; /* Thread group tracking: */ - u32 parent_exec_id; - u32 self_exec_id; + u64 parent_exec_id; + u64 self_exec_id; /* Protection against (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, mempolicy: */ spinlock_t alloc_lock; @@ -971,7 +983,10 @@ struct task_struct { /* Deadlock detection and priority inheritance handling: */ struct rt_mutex_waiter *pi_blocked_on; #endif - +#ifdef CONFIG_MM_EVENT_STAT + struct mm_event_task mm_event[MM_TYPE_NUM]; + unsigned long next_period; +#endif #ifdef CONFIG_DEBUG_MUTEXES /* Mutex deadlock detection: */ struct mutex_waiter *blocked_on; @@ -1278,6 +1293,15 @@ struct task_struct { void *security; #endif + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); + ANDROID_KABI_RESERVE(5); + ANDROID_KABI_RESERVE(6); + ANDROID_KABI_RESERVE(7); + ANDROID_KABI_RESERVE(8); + /* * New fields for task_struct should be added above here, so that * they are included in the randomized portion of task_struct. diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h index 660d78c9af6c82992e7b7ea154ca10abc8e4d067..9c456c7b43a2ba70f2360de5e7109d5a5ebb6e55 100644 --- a/include/linux/sched/signal.h +++ b/include/linux/sched/signal.h @@ -8,6 +8,7 @@ #include #include #include +#include /* * Types defining task->signal and task->sighand and APIs using them: @@ -232,6 +233,10 @@ struct signal_struct { struct mutex cred_guard_mutex; /* guard against foreign influences on * credential calculations * (notably. ptrace) */ + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); } __randomize_layout; /* diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h index 22dcac4dbcf5e6e142ffc741332b0597da12e4ee..f0051725ca869b5312daa823273be609fcf48dad 100644 --- a/include/linux/sched/topology.h +++ b/include/linux/sched/topology.h @@ -3,6 +3,7 @@ #define _LINUX_SCHED_TOPOLOGY_H #include +#include #include @@ -66,6 +67,8 @@ struct sched_domain_shared { atomic_t ref; atomic_t nr_busy_cpus; int has_idle_cores; + + bool overutilized; }; struct sched_domain { @@ -141,6 +144,10 @@ struct sched_domain { struct sched_domain_shared *shared; unsigned int span_weight; + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + /* * Span of all CPUs in this domain. * diff --git a/include/linux/sched/user.h b/include/linux/sched/user.h index 39ad98c09c5808e42a04c5155f0d52aef92cbae8..af25768124844692258fe143dbb545f662ae391e 100644 --- a/include/linux/sched/user.h +++ b/include/linux/sched/user.h @@ -6,6 +6,7 @@ #include #include #include +#include struct key; @@ -46,6 +47,9 @@ struct user_struct { /* Miscellaneous per-user rate limit */ struct ratelimit_state ratelimit; + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); }; extern int uids_sysfs_init(void); diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index def3abeb69cdc2692c265981edd2f0c2900f2da3..5fb778155f61a22471225eeab12d3a02c703fd2f 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -249,6 +249,7 @@ struct nf_conntrack { atomic_t use; }; #endif +#include struct nf_bridge_info { refcount_t use; @@ -845,6 +846,9 @@ struct sk_buff { __u32 headers_end[0]; /* public: */ + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + /* These elements must be at the end, see alloc_skb() for details. */ sk_buff_data_t tail; sk_buff_data_t end; diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index fd78f78df5c662b61430b93858bb5295fec45480..3e3214cca4470f4684c02722526bf0971c44a35e 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -159,6 +159,7 @@ extern bool svc_rdma_post_recvs(struct svcxprt_rdma *rdma); extern void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma, struct svc_rdma_recv_ctxt *ctxt); extern void svc_rdma_flush_recv_queues(struct svcxprt_rdma *rdma); +extern void svc_rdma_release_rqst(struct svc_rqst *rqstp); extern int svc_rdma_recvfrom(struct svc_rqst *); /* svc_rdma_rw.c */ diff --git a/include/linux/swab.h b/include/linux/swab.h index e466fd159c857d6d7acec6da574c8647067e063a..bcff5149861a9eb0dc2fa3b8da4bf8a80e5dc343 100644 --- a/include/linux/swab.h +++ b/include/linux/swab.h @@ -7,6 +7,7 @@ # define swab16 __swab16 # define swab32 __swab32 # define swab64 __swab64 +# define swab __swab # define swahw32 __swahw32 # define swahb32 __swahb32 # define swab16p __swab16p diff --git a/include/linux/swapops.h b/include/linux/swapops.h index 22af9d8a84ae2fe6f11402250b6e6a9367d867d0..28d572b7ea73e901fa114bb96151bae03e2ccca5 100644 --- a/include/linux/swapops.h +++ b/include/linux/swapops.h @@ -368,7 +368,8 @@ static inline void num_poisoned_pages_inc(void) } #endif -#if defined(CONFIG_MEMORY_FAILURE) || defined(CONFIG_MIGRATION) +#if defined(CONFIG_MEMORY_FAILURE) || defined(CONFIG_MIGRATION) || \ + defined(CONFIG_DEVICE_PRIVATE) static inline int non_swap_entry(swp_entry_t entry) { return swp_type(entry) >= MAX_SWAPFILES; diff --git a/include/linux/thermal.h b/include/linux/thermal.h index b0a5f4830cdbcb208bdb03077fee2e10024d7421..c769773124f294110984e5a0e7ebdb93f36cdd44 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -520,10 +520,10 @@ void thermal_zone_device_update_temp(struct thermal_zone_device *tz, enum thermal_notify_event event, int temp); void thermal_zone_set_trips(struct thermal_zone_device *); -struct thermal_cooling_device *thermal_cooling_device_register(char *, void *, - const struct thermal_cooling_device_ops *); +struct thermal_cooling_device *thermal_cooling_device_register(const char *, + void *, const struct thermal_cooling_device_ops *); struct thermal_cooling_device * -thermal_of_cooling_device_register(struct device_node *np, char *, void *, +thermal_of_cooling_device_register(struct device_node *np, const char *, void *, const struct thermal_cooling_device_ops *); void thermal_cooling_device_unregister(struct thermal_cooling_device *); struct thermal_zone_device *thermal_zone_get_zone_by_name(const char *name); diff --git a/include/linux/time.h b/include/linux/time.h index 5f3e499788379acc26a6d2033321e510f56eecff..47ed31fe5bb1d409fd068b3d592ce26b6717a626 100644 --- a/include/linux/time.h +++ b/include/linux/time.h @@ -109,4 +109,7 @@ static inline bool itimerspec64_valid(const struct itimerspec64 *its) * Equivalent to !(time_before32(@t, @l) || time_after32(@t, @h)). */ #define time_between32(t, l, h) ((u32)(h) - (u32)(l) >= (u32)(t) - (u32)(l)) + +# include + #endif diff --git a/include/linux/time32.h b/include/linux/time32.h index d1ae43c13e25940e2febed51919c171e3018ed3f..00363b4094da61e1695d5d7e2af76826591ba6bf 100644 --- a/include/linux/time32.h +++ b/include/linux/time32.h @@ -11,6 +11,10 @@ #include +#include + +#include + #define TIME_T_MAX (time_t)((1UL << ((sizeof(time_t) << 3) - 1)) - 1) #if __BITS_PER_LONG == 64 @@ -212,9 +216,6 @@ extern struct __kernel_old_timeval ns_to_kernel_old_timeval(s64 nsec); * the compat code so it can be shared between 32-bit and 64-bit builds * both of which provide compatibility with old 32-bit tasks. */ -#define old_time32_t compat_time_t -#define old_timeval32 compat_timeval -#define old_timespec32 compat_timespec #define old_itimerspec32 compat_itimerspec #define ns_to_old_timeval32 ns_to_compat_timeval #define get_old_itimerspec32 get_compat_itimerspec64 diff --git a/include/linux/time64.h b/include/linux/time64.h index 4a45aea0f96e92fdc69055d1d41a4c3d0116f7ce..0333e106b5cf3cf63245758003b989408a5c3aea 100644 --- a/include/linux/time64.h +++ b/include/linux/time64.h @@ -3,6 +3,7 @@ #define _LINUX_TIME64_H #include +#include typedef __s64 time64_t; typedef __u64 timeu64_t; @@ -27,15 +28,6 @@ struct itimerspec64 { struct timespec64 it_value; }; -/* Parameters used to convert the timespec values: */ -#define MSEC_PER_SEC 1000L -#define USEC_PER_MSEC 1000L -#define NSEC_PER_USEC 1000L -#define NSEC_PER_MSEC 1000000L -#define USEC_PER_SEC 1000000L -#define NSEC_PER_SEC 1000000000L -#define FSEC_PER_SEC 1000000000000000LL - /* Located here for timespec[64]_valid_strict */ #define TIME64_MAX ((s64)~((u64)1 << 63)) #define KTIME_MAX ((s64)~((u64)1 << 63)) diff --git a/include/linux/timer.h b/include/linux/timer.h index 7b066fd38248bb48466fa53c0e6462d2af76dc33..611dd9578d1f6acc9da7a91ef6b43bb143e1f0b3 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h @@ -7,6 +7,7 @@ #include #include #include +#include struct timer_list { /* @@ -21,6 +22,9 @@ struct timer_list { #ifdef CONFIG_LOCKDEP struct lockdep_map lockdep_map; #endif + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); }; #ifdef CONFIG_LOCKDEP diff --git a/include/linux/usb.h b/include/linux/usb.h index ba7199cc8ff70225050895c2b339e42a16fad3c2..75d7cc50b47f730b174667ee776253ca3a067d23 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -22,6 +22,7 @@ #include /* for current && schedule_timeout */ #include /* for struct mutex */ #include /* for runtime PM */ +#include struct usb_device; struct usb_driver; @@ -257,6 +258,11 @@ struct usb_interface { struct device dev; /* interface specific device info */ struct device *usb_dev; struct work_struct reset_ws; /* for resets in atomic context */ + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); }; #define to_usb_interface(d) container_of(d, struct usb_interface, dev) @@ -402,6 +408,13 @@ struct usb_host_bos { struct usb_ssp_cap_descriptor *ssp_cap; struct usb_ss_container_id_descriptor *ss_id; struct usb_ptm_cap_descriptor *ptm_cap; + struct usb_config_summary_descriptor *config_summary; + unsigned int num_config_summary_desc; + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); }; int __usb_get_extra_descriptor(char *buffer, unsigned size, @@ -466,6 +479,20 @@ struct usb_bus { struct mon_bus *mon_bus; /* non-null when associated */ int monitored; /* non-zero when monitored */ #endif + unsigned skip_resume:1; /* All USB devices are brought into full + * power state after system resume. It + * is desirable for some buses to keep + * their devices in suspend state even + * after system resume. The devices + * are resumed later when a remote + * wakeup is detected or an interface + * driver starts I/O. + */ + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); }; struct usb_dev_state; @@ -707,6 +734,11 @@ struct usb_device { unsigned lpm_disable_count; u16 hub_delay; + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); }; #define to_usb_device(d) container_of(d, struct usb_device, dev) @@ -828,6 +860,11 @@ extern int usb_sec_event_ring_setup(struct usb_device *dev, unsigned int intr_num); extern int usb_sec_event_ring_cleanup(struct usb_device *dev, unsigned int intr_num); + +extern phys_addr_t usb_get_sec_event_ring_phys_addr( + struct usb_device *dev, unsigned int intr_num, dma_addr_t *dma); +extern phys_addr_t usb_get_xfer_ring_phys_addr(struct usb_device *dev, + struct usb_host_endpoint *ep, dma_addr_t *dma); extern int usb_get_controller_id(struct usb_device *dev); extern int usb_stop_endpoint(struct usb_device *dev, @@ -1211,6 +1248,11 @@ struct usb_driver { unsigned int supports_autosuspend:1; unsigned int disable_hub_initiated_lpm:1; unsigned int soft_unbind:1; + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); }; #define to_usb_driver(d) container_of(d, struct usb_driver, drvwrap.driver) @@ -1585,6 +1627,10 @@ struct urb { usb_complete_t complete; /* (in) completion routine */ struct usb_iso_packet_descriptor iso_frame_desc[0]; /* (in) ISO ONLY */ + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); }; /* ----------------------------------------------------------------------- */ @@ -1998,8 +2044,11 @@ static inline int usb_translate_errors(int error_code) #define USB_DEVICE_REMOVE 0x0002 #define USB_BUS_ADD 0x0003 #define USB_BUS_REMOVE 0x0004 +#define USB_BUS_DIED 0x0005 extern void usb_register_notify(struct notifier_block *nb); extern void usb_unregister_notify(struct notifier_block *nb); +extern void usb_register_atomic_notify(struct notifier_block *nb); +extern void usb_unregister_atomic_notify(struct notifier_block *nb); /* debugfs stuff */ extern struct dentry *usb_debug_root; diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h index 9627574e2450b9132bbf856f17fbb4ec51a2ce3b..9a520f42aaac605fec936eb5af30faedafc454f9 100644 --- a/include/linux/usb/gadget.h +++ b/include/linux/usb/gadget.h @@ -73,6 +73,7 @@ struct usb_ep; * Note that for writes (IN transfers) some data bytes may still * reside in a device-side FIFO when the request is reported as * complete. + * @udc_priv: Vendor private data in usage by the UDC. * * These are allocated/freed through the endpoint they're used with. The * hardware's driver can add extra per-request data to the memory it returns, @@ -114,6 +115,52 @@ struct usb_request { int status; unsigned actual; + unsigned int udc_priv; +}; + +/* + * @buf_base_addr: Base pointer to buffer allocated for each GSI enabled EP. + * TRBs point to buffers that are split from this pool. The size of the + * buffer is num_bufs times buf_len. num_bufs and buf_len are determined + based on desired performance and aggregation size. + * @dma: DMA address corresponding to buf_base_addr. + * @num_bufs: Number of buffers associated with the GSI enabled EP. This + * corresponds to the number of non-zlp TRBs allocated for the EP. + * The value is determined based on desired performance for the EP. + * @buf_len: Size of each individual buffer is determined based on aggregation + * negotiated as per the protocol. In case of no aggregation supported by + * the protocol, we use default values. + * @db_reg_phs_addr_lsb: IPA channel doorbell register's physical address LSB + * @mapped_db_reg_phs_addr_lsb: doorbell LSB IOVA address mapped with IOMMU + * @db_reg_phs_addr_msb: IPA channel doorbell register's physical address MSB + */ +struct usb_gsi_request { + void *buf_base_addr; + dma_addr_t dma; + size_t num_bufs; + size_t buf_len; + u32 db_reg_phs_addr_lsb; + dma_addr_t mapped_db_reg_phs_addr_lsb; + u32 db_reg_phs_addr_msb; + struct sg_table sgt_trb_xfer_ring; + struct sg_table sgt_data_buff; +}; + +enum gsi_ep_op { + GSI_EP_OP_CONFIG = 0, + GSI_EP_OP_STARTXFER, + GSI_EP_OP_STORE_DBL_INFO, + GSI_EP_OP_ENABLE_GSI, + GSI_EP_OP_UPDATEXFER, + GSI_EP_OP_RING_DB, + GSI_EP_OP_ENDXFER, + GSI_EP_OP_GET_CH_INFO, + GSI_EP_OP_GET_XFER_IDX, + GSI_EP_OP_PREPARE_TRBS, + GSI_EP_OP_FREE_TRBS, + GSI_EP_OP_SET_CLR_BLOCK_DBL, + GSI_EP_OP_CHECK_FOR_SUSPEND, + GSI_EP_OP_DISABLE, }; /*-------------------------------------------------------------------------*/ @@ -144,6 +191,9 @@ struct usb_ep_ops { int (*fifo_status) (struct usb_ep *ep); void (*fifo_flush) (struct usb_ep *ep); + int (*gsi_ep_op) (struct usb_ep *ep, void *op_data, + enum gsi_ep_op op); + }; /** @@ -263,6 +313,8 @@ int usb_ep_clear_halt(struct usb_ep *ep); int usb_ep_set_wedge(struct usb_ep *ep); int usb_ep_fifo_status(struct usb_ep *ep); void usb_ep_fifo_flush(struct usb_ep *ep); +int usb_gsi_ep_op(struct usb_ep *ep, + struct usb_gsi_request *req, enum gsi_ep_op op); #else static inline void usb_ep_set_maxpacket_limit(struct usb_ep *ep, unsigned maxpacket_limit) @@ -292,6 +344,10 @@ static inline int usb_ep_fifo_status(struct usb_ep *ep) { return 0; } static inline void usb_ep_fifo_flush(struct usb_ep *ep) { } + +static inline int usb_gsi_ep_op(struct usb_ep *ep, + struct usb_gsi_request *req, enum gsi_ep_op op) +{ return 0; } #endif /* USB_GADGET */ /*-------------------------------------------------------------------------*/ @@ -329,6 +385,7 @@ struct usb_gadget_ops { struct usb_ep *(*match_ep)(struct usb_gadget *, struct usb_endpoint_descriptor *, struct usb_ss_ep_comp_descriptor *); + int (*restart)(struct usb_gadget *g); }; /** @@ -380,6 +437,8 @@ struct usb_gadget_ops { * @connected: True if gadget is connected. * @lpm_capable: If the gadget max_speed is FULL or HIGH, this flag * indicates that it supports LPM as per the LPM ECN & errata. + * @remote_wakeup: Indicates if the host has enabled the remote_wakeup + * feature. * * Gadgets have a mostly-portable "gadget driver" implementing device * functions, handling all usb configurations and interfaces. Gadget @@ -434,6 +493,7 @@ struct usb_gadget { unsigned deactivated:1; unsigned connected:1; unsigned lpm_capable:1; + unsigned remote_wakeup:1; }; #define work_to_gadget(w) (container_of((w), struct usb_gadget, work)) @@ -582,7 +642,8 @@ static inline int usb_gadget_frame_number(struct usb_gadget *gadget) { return 0; } static inline int usb_gadget_wakeup(struct usb_gadget *gadget) { return 0; } -static int usb_gadget_func_wakeup(struct usb_gadget *gadget, int interface_id) +static inline int usb_gadget_func_wakeup(struct usb_gadget *gadget, + int interface_id) { return 0; } static inline int usb_gadget_set_selfpowered(struct usb_gadget *gadget) { return 0; } diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h index 24b33e9e9b86de8a362b9c9b6964cd4b8615f0b3..7a17449c410c49da869ef7af308af7d7cbb144bc 100644 --- a/include/linux/usb/hcd.h +++ b/include/linux/usb/hcd.h @@ -25,6 +25,7 @@ #include #include #include +#include #define MAX_TOPO_LEVEL 6 @@ -217,6 +218,11 @@ struct usb_hcd { * (ohci 32, uhci 1024, ehci 256/512/1024). */ + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); + /* The HC driver's private data is stored at the end of * this structure. */ @@ -410,9 +416,19 @@ struct hc_driver { int (*sec_event_ring_setup)(struct usb_hcd *hcd, unsigned int intr_num); int (*sec_event_ring_cleanup)(struct usb_hcd *hcd, unsigned int intr_num); - int (*get_core_id)(struct usb_hcd *hcd); + phys_addr_t (*get_sec_event_ring_phys_addr)(struct usb_hcd *hcd, + unsigned int intr_num, dma_addr_t *dma); + phys_addr_t (*get_xfer_ring_phys_addr)(struct usb_hcd *hcd, + struct usb_device *udev, struct usb_host_endpoint *ep, + dma_addr_t *dma); + int (*get_core_id)(struct usb_hcd *hcd); int (*stop_endpoint)(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep); + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); }; static inline int hcd_giveback_urb_in_bh(struct usb_hcd *hcd) @@ -455,6 +471,10 @@ extern int usb_hcd_sec_event_ring_setup(struct usb_device *udev, unsigned int intr_num); extern int usb_hcd_sec_event_ring_cleanup(struct usb_device *udev, unsigned int intr_num); +extern phys_addr_t usb_hcd_get_sec_event_ring_phys_addr( + struct usb_device *udev, unsigned int intr_num, dma_addr_t *dma); +extern phys_addr_t usb_hcd_get_xfer_ring_phys_addr( + struct usb_device *udev, struct usb_host_endpoint *ep, dma_addr_t *dma); extern int usb_hcd_get_controller_id(struct usb_device *udev); extern int usb_hcd_stop_endpoint(struct usb_device *udev, struct usb_host_endpoint *ep); @@ -563,6 +583,11 @@ struct usb_tt { spinlock_t lock; struct list_head clear_list; /* of usb_tt_clear */ struct work_struct clear_work; + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); }; struct usb_tt_clear { diff --git a/include/linux/usb/phy.h b/include/linux/usb/phy.h index e4de6bc1f69b6287cb49882c3235b824bb474d13..b1f7d2e27d222673673bb1575ee5c62be691f680 100644 --- a/include/linux/usb/phy.h +++ b/include/linux/usb/phy.h @@ -15,6 +15,17 @@ #include #include +#define ENABLE_DP_MANUAL_PULLUP BIT(0) +#define ENABLE_SECONDARY_PHY BIT(1) +#define PHY_HOST_MODE BIT(2) +#define PHY_CHARGER_CONNECTED BIT(3) +#define PHY_VBUS_VALID_OVERRIDE BIT(4) +#define DEVICE_IN_SS_MODE BIT(5) +#define PHY_LANE_A BIT(6) +#define PHY_LANE_B BIT(7) +#define PHY_HSFS_MODE BIT(8) +#define PHY_LS_MODE BIT(9) + enum usb_phy_interface { USBPHY_INTERFACE_MODE_UNKNOWN, USBPHY_INTERFACE_MODE_UTMI, @@ -37,6 +48,8 @@ enum usb_phy_type { USB_PHY_TYPE_UNDEFINED, USB_PHY_TYPE_USB2, USB_PHY_TYPE_USB3, + USB_PHY_TYPE_USB3_OR_DP, + USB_PHY_TYPE_USB3_AND_DP, }; /* OTG defines lots of enumeration states before device reset */ @@ -47,6 +60,7 @@ enum usb_otg_state { OTG_STATE_B_IDLE, OTG_STATE_B_SRP_INIT, OTG_STATE_B_PERIPHERAL, + OTG_STATE_B_SUSPEND, /* extra dual-role default-b states */ OTG_STATE_B_WAIT_ACON, @@ -155,6 +169,10 @@ struct usb_phy { * manually detect the charger type. */ enum usb_charger_type (*charger_detect)(struct usb_phy *x); + + /* reset the PHY clocks */ + int (*reset)(struct usb_phy *x); + int (*drive_dp_pulse)(struct usb_phy *x, unsigned int pulse_width); }; /* for board-specific init logic */ @@ -213,6 +231,24 @@ usb_phy_vbus_off(struct usb_phy *x) return x->set_vbus(x, false); } +static inline int +usb_phy_reset(struct usb_phy *x) +{ + if (x && x->reset) + return x->reset(x); + + return 0; +} + +static inline int +usb_phy_drive_dp_pulse(struct usb_phy *x, unsigned int pulse_width) +{ + if (x && x->drive_dp_pulse) + return x->drive_dp_pulse(x, pulse_width); + + return 0; +} + /* for usb host and peripheral controller drivers */ #if IS_ENABLED(CONFIG_USB_PHY) extern struct usb_phy *usb_get_phy(enum usb_phy_type type); diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h index e2ec3582e54937d3818afed3e253440fc23541a0..e39bf69bbc9c87c4a01d18599e290a85ffff7a9d 100644 --- a/include/linux/usb/usbnet.h +++ b/include/linux/usb/usbnet.h @@ -23,6 +23,8 @@ #ifndef __LINUX_USB_USBNET_H #define __LINUX_USB_USBNET_H +#include + /* interface from usbnet core to each USB networking link we handle */ struct usbnet { /* housekeeping */ @@ -83,6 +85,11 @@ struct usbnet { # define EVENT_LINK_CHANGE 11 # define EVENT_SET_RX_MODE 12 # define EVENT_NO_IP_ALIGN 13 + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); }; static inline struct usb_driver *driver_of(struct usb_interface *intf) @@ -172,6 +179,9 @@ struct driver_info { int out; /* tx endpoint */ unsigned long data; /* Misc driver specific data */ + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); }; /* Minidrivers are just drivers using the "usbnet" core as a powerful diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h index d6b74b91096b4db0f1fdca10990f9f21bb42d690..c1de1158bac0eeaa4894c491df1f792a94585cee 100644 --- a/include/linux/user_namespace.h +++ b/include/linux/user_namespace.h @@ -10,6 +10,7 @@ #include #include #include +#include #define UID_GID_MAP_MAX_BASE_EXTENTS 5 #define UID_GID_MAP_MAX_EXTENTS 340 @@ -76,6 +77,9 @@ struct user_namespace { #endif struct ucounts *ucounts; int ucount_max[UCOUNT_COUNTS]; + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); } __randomize_layout; struct ucounts { diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h index e0348cb0a1dd7d2f3320e58a7ec1cc76e4a8799b..f36727098df860308f11095a4fef43e9d7d7894c 100644 --- a/include/linux/virtio_net.h +++ b/include/linux/virtio_net.h @@ -3,6 +3,8 @@ #define _LINUX_VIRTIO_NET_H #include +#include +#include #include static inline int virtio_net_hdr_set_proto(struct sk_buff *skb, @@ -28,17 +30,25 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb, bool little_endian) { unsigned int gso_type = 0; + unsigned int thlen = 0; + unsigned int ip_proto; if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { case VIRTIO_NET_HDR_GSO_TCPV4: gso_type = SKB_GSO_TCPV4; + ip_proto = IPPROTO_TCP; + thlen = sizeof(struct tcphdr); break; case VIRTIO_NET_HDR_GSO_TCPV6: gso_type = SKB_GSO_TCPV6; + ip_proto = IPPROTO_TCP; + thlen = sizeof(struct tcphdr); break; case VIRTIO_NET_HDR_GSO_UDP: gso_type = SKB_GSO_UDP; + ip_proto = IPPROTO_UDP; + thlen = sizeof(struct udphdr); break; default: return -EINVAL; @@ -57,16 +67,22 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb, if (!skb_partial_csum_set(skb, start, off)) return -EINVAL; + + if (skb_transport_offset(skb) + thlen > skb_headlen(skb)) + return -EINVAL; } else { /* gso packets without NEEDS_CSUM do not set transport_offset. * probe and drop if does not match one of the above types. */ if (gso_type && skb->network_header) { + struct flow_keys_basic keys; + if (!skb->protocol) virtio_net_hdr_set_proto(skb, hdr); retry: - skb_probe_transport_header(skb, -1); - if (!skb_transport_header_was_set(skb)) { + if (!skb_flow_dissect_flow_keys_basic(skb, &keys, + NULL, 0, 0, 0, + 0)) { /* UFO does not specify ipv4 or 6: try both */ if (gso_type & SKB_GSO_UDP && skb->protocol == htons(ETH_P_IP)) { @@ -75,6 +91,12 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb, } return -EINVAL; } + + if (keys.control.thoff + thlen > skb_headlen(skb) || + keys.basic.ip_proto != ip_proto) + return -EINVAL; + + skb_set_transport_header(skb, keys.control.thoff); } } diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 44050689e68b8b5a47364196c99315d44307254c..0e5b5ee47fef1227b8a934f55eef8225d0013e68 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -105,7 +105,7 @@ extern void vunmap(const void *addr); extern int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr, void *kaddr, - unsigned long size); + unsigned long pgoff, unsigned long size); extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, unsigned long pgoff); diff --git a/include/linux/wait.h b/include/linux/wait.h index ed7c122cb31f407f70a36a198d5e6d9ae48c01fa..62aa6e0b3149c986d79febcfc624cd4d2070e435 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -7,6 +7,7 @@ #include #include #include +#include #include #include @@ -1111,9 +1112,12 @@ void prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *w void prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state); long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state); void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry); -long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout); -int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key); -int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key); +long __sched wait_woken(struct wait_queue_entry *wq_entry, unsigned int mode, + long timeout); +int __sched woken_wake_function(struct wait_queue_entry *wq_entry, + unsigned int mode, int sync, void *key); +int __sched autoremove_wake_function(struct wait_queue_entry *wq_entry, + unsigned int mode, int sync, void *key); #define DEFINE_WAIT_FUNC(name, function) \ struct wait_queue_entry name = { \ diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 60d673e15632126a038b1efe12108abe122fc235..342f374792280506175a5fc6a2d67009eca743e3 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -14,6 +14,7 @@ #include #include #include +#include struct workqueue_struct; @@ -106,6 +107,8 @@ struct work_struct { #ifdef CONFIG_LOCKDEP struct lockdep_map lockdep_map; #endif + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); }; #define WORK_DATA_INIT() ATOMIC_LONG_INIT((unsigned long)WORK_STRUCT_NO_POOL) @@ -119,6 +122,9 @@ struct delayed_work { /* target workqueue and CPU ->timer uses to queue ->work */ struct workqueue_struct *wq; int cpu; + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); }; struct rcu_work { diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h index bddd86c11f5f5bce47da89938757c0ddcd717f91..278613ab90590a564345463abe0b226bde886928 100644 --- a/include/media/videobuf2-core.h +++ b/include/media/videobuf2-core.h @@ -18,7 +18,7 @@ #include #include -#define VB2_MAX_FRAME (32) +#define VB2_MAX_FRAME (64) #define VB2_MAX_PLANES (8) /** diff --git a/include/net/addrconf.h b/include/net/addrconf.h index d803aa7c22d491a4f67706ffb1f6f0eb74814b90..710900d72562bd59dea902416a52cc1d95b0ba2b 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h @@ -235,8 +235,10 @@ struct ipv6_stub { const struct in6_addr *addr); int (*ipv6_sock_mc_drop)(struct sock *sk, int ifindex, const struct in6_addr *addr); - int (*ipv6_dst_lookup)(struct net *net, struct sock *sk, - struct dst_entry **dst, struct flowi6 *fl6); + struct dst_entry *(*ipv6_dst_lookup_flow)(struct net *net, + const struct sock *sk, + struct flowi6 *fl6, + const struct in6_addr *final_dst); struct fib6_table *(*fib6_get_table)(struct net *net, u32 id); struct fib6_info *(*fib6_lookup)(struct net *net, int oif, diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index bcbeea774ee73e07872ef615ad8a2f1a9936d7e3..019d45332217b4278cd5ec720de32f31417e9aa4 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -6,7 +6,7 @@ * Copyright 2006-2010 Johannes Berg * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018 Intel Corporation + * Copyright (C) 2018-2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -25,6 +25,9 @@ #include #include +/* Indicate support for including KEK length in rekey data */ +#define CFG80211_REKEY_DATA_KEK_LEN 1 + /** * DOC: Introduction * @@ -1381,6 +1384,10 @@ struct cfg80211_tid_stats { * @ack_signal: signal strength (in dBm) of the last ACK frame. * @avg_ack_signal: average rssi value of ack packet for the no of msdu's has * been sent. + * @rx_mpdu_count: number of MPDUs received from this station + * @fcs_err_count: number of packets (MPDUs) received from this station with + * an FCS error. This counter should be incremented only when TA of the + * received packet with an FCS error matches the peer MAC address. */ struct station_info { u64 filled; @@ -1427,6 +1434,9 @@ struct station_info { struct cfg80211_tid_stats *pertid; s8 ack_signal; s8 avg_ack_signal; + + u32 rx_mpdu_count; + u32 fcs_err_count; }; #if IS_ENABLED(CONFIG_CFG80211) @@ -2071,6 +2081,8 @@ struct cfg80211_bss_ies { * @signal: signal strength value (type depends on the wiphy's signal_type) * @chains: bitmask for filled values in @chain_signal. * @chain_signal: per-chain signal strength of last received BSS in dBm. + * @bssid_index: index in the multiple BSS set + * @max_bssid_indicator: max number of members in the BSS set * @priv: private area for driver use, has at least wiphy->bss_priv_size bytes */ struct cfg80211_bss { @@ -2082,6 +2094,8 @@ struct cfg80211_bss { const struct cfg80211_bss_ies __rcu *proberesp_ies; struct cfg80211_bss *hidden_beacon_bss; + struct cfg80211_bss *transmitted_bss; + struct list_head nontrans_list; s32 signal; @@ -2092,6 +2106,9 @@ struct cfg80211_bss { u8 chains; s8 chain_signal[IEEE80211_MAX_CHAINS]; + u8 bssid_index; + u8 max_bssid_indicator; + u8 priv[0] __aligned(sizeof(void *)); }; @@ -2657,12 +2674,14 @@ struct cfg80211_wowlan_wakeup { /** * struct cfg80211_gtk_rekey_data - rekey data - * @kek: key encryption key (NL80211_KEK_LEN bytes) + * @kek: key encryption key * @kck: key confirmation key (NL80211_KCK_LEN bytes) * @replay_ctr: replay counter (NL80211_REPLAY_CTR_LEN bytes) + * @kek_len: Length of @kek in octets */ struct cfg80211_gtk_rekey_data { const u8 *kek, *kck, *replay_ctr; + size_t kek_len; }; /** @@ -2891,6 +2910,32 @@ struct cfg80211_external_auth_params { const u8 *pmkid; }; +/** + * struct cfg80211_update_owe_info - OWE Information + * + * This structure provides information needed for the drivers to offload OWE + * (Opportunistic Wireless Encryption) processing to the user space. + * + * Commonly used across update_owe_info request and event interfaces. + * + * @peer: MAC address of the peer device for which the OWE processing + * has to be done. + * @status: status code, %WLAN_STATUS_SUCCESS for successful OWE info + * processing, use %WLAN_STATUS_UNSPECIFIED_FAILURE if user space + * cannot give you the real status code for failures. Used only for + * OWE update request command interface (user space to driver). + * @ie: IEs obtained from the peer or constructed by the user space. These are + * the IEs of the remote peer in the event from the host driver and + * the constructed IEs by the user space in the request interface. + * @ie_len: Length of IEs in octets. + */ +struct cfg80211_update_owe_info { + u8 peer[ETH_ALEN] __aligned(2); + u16 status; + const u8 *ie; + size_t ie_len; +}; + /** * struct cfg80211_ops - backend description for wireless configuration * @@ -2938,6 +2983,8 @@ struct cfg80211_external_auth_params { * @set_default_key: set the default key on an interface * * @set_default_mgmt_key: set the default management frame key on an interface + + * @set_default_beacon_key: set the default Beacon frame key on an interface * * @set_rekey_data: give the data necessary for GTK rekeying to the driver * @@ -3226,6 +3273,10 @@ struct cfg80211_external_auth_params { * * @tx_control_port: TX a control port frame (EAPoL). The noencrypt parameter * tells the driver that the frame should not be encrypted. + * + * @update_owe_info: Provide updated OWE info to driver. Driver implementing SME + * but offloading OWE processing to the user space will get the updated + * DH IE through this interface. */ struct cfg80211_ops { int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow); @@ -3259,6 +3310,9 @@ struct cfg80211_ops { int (*set_default_mgmt_key)(struct wiphy *wiphy, struct net_device *netdev, u8 key_index); + int (*set_default_beacon_key)(struct wiphy *wiphy, + struct net_device *netdev, + u8 key_index); int (*start_ap)(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_ap_settings *settings); @@ -3533,6 +3587,8 @@ struct cfg80211_ops { const u8 *buf, size_t len, const u8 *dest, const __be16 proto, const bool noencrypt); + int (*update_owe_info)(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_update_owe_info *owe_info); }; /* @@ -3905,6 +3961,21 @@ struct wiphy_iftype_ext_capab { u8 extended_capabilities_len; }; +/** + * struct wiphy_iftype_akm_suites - This structure encapsulates supported akm + * suites for interface types defined in @iftypes_mask. Each type in the + * @iftypes_mask must be unique across all instances of iftype_akm_suites. + * + * @iftypes_mask: bitmask of interfaces types + * @akm_suites: points to an array of supported akm suites + * @n_akm_suites: number of supported AKM suites + */ +struct wiphy_iftype_akm_suites { + u16 iftypes_mask; + const u32 *akm_suites; + int n_akm_suites; +}; + /** * struct wiphy - wireless hardware description * @reg_notifier: the driver's regulatory notification callback, @@ -3917,6 +3988,12 @@ struct wiphy_iftype_ext_capab { * @signal_type: signal type reported in &struct cfg80211_bss. * @cipher_suites: supported cipher suites * @n_cipher_suites: number of supported cipher suites + * @iftype_akm_suites: array of supported akm suites info per interface type. + * Note that the bits in @iftypes_mask inside this structure cannot + * overlap (i.e. only one occurrence of each type is allowed across all + * instances of iftype_akm_suites). + * @num_iftype_akm_suites: number of interface types for which supported akm + * suites are specified separately. * @retry_short: Retry limit for short frames (dot11ShortRetryLimit) * @retry_long: Retry limit for long frames (dot11LongRetryLimit) * @frag_threshold: Fragmentation threshold (dot11FragmentationThreshold); @@ -4070,6 +4147,11 @@ struct wiphy_iftype_ext_capab { * @txq_limit: configuration of internal TX queue frame limit * @txq_memory_limit: configuration internal TX queue memory limit * @txq_quantum: configuration of internal TX queue scheduler quantum + * + * @support_mbssid: can HW support association with nontransmitted AP + * @support_only_he_mbssid: don't parse MBSSID elements if it is not + * HE AP, in order to avoid compatibility issues. + * @support_mbssid must be set for this to have any effect. */ struct wiphy { /* assign these fields before you register the wiphy */ @@ -4114,6 +4196,9 @@ struct wiphy { int n_cipher_suites; const u32 *cipher_suites; + const struct wiphy_iftype_akm_suites *iftype_akm_suites; + unsigned int num_iftype_akm_suites; + u8 retry_short; u8 retry_long; u32 frag_threshold; @@ -4208,6 +4293,9 @@ struct wiphy { u32 txq_memory_limit; u32 txq_quantum; + u8 support_mbssid:1, + support_only_he_mbssid:1; + char priv[0] __aligned(NETDEV_ALIGN); }; @@ -5078,6 +5166,27 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy, return cfg80211_inform_bss_frame_data(wiphy, &data, mgmt, len, gfp); } +/** + * cfg80211_gen_new_bssid - generate a nontransmitted BSSID for multi-BSSID + * @bssid: transmitter BSSID + * @max_bssid: max BSSID indicator, taken from Multiple BSSID element + * @mbssid_index: BSSID index, taken from Multiple BSSID index element + * @new_bssid: calculated nontransmitted BSSID + */ +static inline void cfg80211_gen_new_bssid(const u8 *bssid, u8 max_bssid, + u8 mbssid_index, u8 *new_bssid) +{ + u64 bssid_u64 = ether_addr_to_u64(bssid); + u64 mask = GENMASK_ULL(max_bssid - 1, 0); + u64 new_bssid_u64; + + new_bssid_u64 = bssid_u64 & ~mask; + + new_bssid_u64 |= ((bssid_u64 & mask) + mbssid_index) & mask; + + u64_to_ether_addr(new_bssid_u64, new_bssid); +} + /** * enum cfg80211_bss_frame_type - frame type that the BSS data came from * @CFG80211_BSS_FTYPE_UNKNOWN: driver doesn't know whether the data is @@ -6743,4 +6852,14 @@ bool cfg80211_iftype_allowed(struct wiphy *wiphy, enum nl80211_iftype iftype, #define wiphy_WARN(wiphy, format, args...) \ WARN(1, "wiphy: %s\n" format, wiphy_name(wiphy), ##args); +/** + * cfg80211_update_owe_info_event - Notify the peer's OWE info to user space + * @netdev: network device + * @owe_info: peer's owe info + * @gfp: allocation flags + */ +void cfg80211_update_owe_info_event(struct net_device *netdev, + struct cfg80211_update_owe_info *owe_info, + gfp_t gfp); + #endif /* __NET_CFG80211_H */ diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h index 7b9c82de11cc9388b070992af610e5fd14b66333..5e26d61867b21d1fed237c5e23b25a1feedb6123 100644 --- a/include/net/ip6_route.h +++ b/include/net/ip6_route.h @@ -234,6 +234,7 @@ static inline bool ipv6_anycast_destination(const struct dst_entry *dst, return rt->rt6i_flags & RTF_ANYCAST || (rt->rt6i_dst.plen < 127 && + !(rt->rt6i_flags & (RTF_GATEWAY | RTF_NONEXTHOP)) && ipv6_addr_equal(&rt->rt6i_dst.addr, daddr)); } diff --git a/include/net/ipv6.h b/include/net/ipv6.h index ff33f498c1373c86886ea579532412fa4eb0285d..4c2e40882e8845757da7ca7fc60f4b638cc41d3f 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -959,7 +959,7 @@ static inline struct sk_buff *ip6_finish_skb(struct sock *sk) int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst, struct flowi6 *fl6); -struct dst_entry *ip6_dst_lookup_flow(const struct sock *sk, struct flowi6 *fl6, +struct dst_entry *ip6_dst_lookup_flow(struct net *net, const struct sock *sk, struct flowi6 *fl6, const struct in6_addr *final_dst); struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6, const struct in6_addr *final_dst, diff --git a/include/net/sock.h b/include/net/sock.h index f359e5c947628d23d32bb37d1beb40d67b408a6a..0d6676062bfaccbd23b554af5cd73795da37f825 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -73,6 +73,7 @@ #include #include #include +#include /* * This structure really needs to be cleaned up. @@ -509,6 +510,15 @@ struct sock { void (*sk_destruct)(struct sock *sk); struct sock_reuseport __rcu *sk_reuseport_cb; struct rcu_head sk_rcu; + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); + ANDROID_KABI_RESERVE(5); + ANDROID_KABI_RESERVE(6); + ANDROID_KABI_RESERVE(7); + ANDROID_KABI_RESERVE(8); }; enum sk_pacing { diff --git a/include/net/tcp.h b/include/net/tcp.h index 918bfd0d7d1f9e03e878e47c9b127bc677fb9ac8..e43df898d3db28ee8cdc58deefbac9d63bf0e85a 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -53,7 +53,7 @@ extern struct inet_hashinfo tcp_hashinfo; extern struct percpu_counter tcp_orphan_count; void tcp_time_wait(struct sock *sk, int state, int timeo); -#define MAX_TCP_HEADER (128 + MAX_HEADER) +#define MAX_TCP_HEADER L1_CACHE_ALIGN(128 + MAX_HEADER) #define MAX_TCP_OPTION_SPACE 40 #define TCP_MIN_SND_MSS 48 #define TCP_MIN_GSO_SIZE (TCP_MIN_SND_MSS - MAX_TCP_OPTION_SPACE) diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h index c891ada3c5c25c1b6f91a33af08765dfabb065b5..16156614fdf77a2b1d6b0b90c85d492095492ecf 100644 --- a/include/scsi/scsi_cmnd.h +++ b/include/scsi/scsi_cmnd.h @@ -11,6 +11,7 @@ #include #include #include +#include struct Scsi_Host; struct scsi_driver; @@ -147,6 +148,11 @@ struct scsi_cmnd { int flags; /* Command flags */ unsigned char tag; /* SCSI-II queued command tag */ + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); }; /* diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index 039e289f295e1a222fb938eba2e5963658c005f0..64cbee49fadaf784289e57722da8028005b50c20 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h @@ -8,6 +8,7 @@ #include #include #include +#include struct device; struct request_queue; @@ -201,6 +202,9 @@ struct scsi_device { unsigned unmap_limit_for_ws:1; /* Use the UNMAP limit for WRITE SAME */ unsigned rpm_autosuspend:1; /* Enable runtime autosuspend at device * creation time */ + /* If non-zero, use timeout (in jiffies) for all commands */ + unsigned int timeout_override; + atomic_t disk_events_disable_depth; /* disable depth for disk events */ DECLARE_BITMAP(supported_events, SDEV_EVT_MAXBITS); /* supported events */ @@ -229,6 +233,11 @@ struct scsi_device { enum scsi_device_state sdev_state; struct task_struct *quiesced_by; unsigned long sdev_data[0]; + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); } __attribute__((aligned(sizeof(unsigned long)))); #define to_scsi_device(d) \ @@ -456,6 +465,8 @@ extern void sdev_disable_disk_events(struct scsi_device *sdev); extern void sdev_enable_disk_events(struct scsi_device *sdev); extern int scsi_vpd_lun_id(struct scsi_device *, char *, size_t); extern int scsi_vpd_tpg_id(struct scsi_device *, int *); +extern void scsi_set_cmd_timeout_override(struct scsi_device *sdev, + unsigned int timeout); #ifdef CONFIG_PM extern int scsi_autopm_get_device(struct scsi_device *); diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h index 1462a3c1065bca3f28ffbd1f2a40e11db9f7a983..0c750a8666d363e736a1f783dfe4a90d0e25c2ec 100644 --- a/include/scsi/scsi_host.h +++ b/include/scsi/scsi_host.h @@ -10,6 +10,7 @@ #include #include #include +#include struct request_queue; struct block_device; @@ -487,6 +488,11 @@ struct scsi_host_template { /* Delay for runtime autosuspend */ int rpm_autosuspend_delay; + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); }; /* @@ -657,6 +663,12 @@ struct Scsi_Host { /* Host responded with short (<36 bytes) INQUIRY result */ unsigned short_inquiry:1; + /* + * Set "DBD" field in mode_sense caching mode page in case it is + * mandatory by LLD standard. + */ + unsigned set_dbd_for_caching:1; + /* * Optional work queue to be utilized by the transport */ diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h index f29c60bc14018adaa7f4a3d1fc369f6a714107d0..325c72ad8fda9b50b0b804bd3299b1b2d2fedbde 100644 --- a/include/sound/compress_driver.h +++ b/include/sound/compress_driver.h @@ -84,6 +84,8 @@ struct snd_compr_stream { * @get_params: retrieve the codec parameters, mandatory * @set_metadata: Set the metadata values for a stream * @get_metadata: retrieves the requested metadata values from stream + * @set_next_track_param: send codec specific data of subsequent track + * in gapless * @trigger: Trigger operations like start, pause, resume, drain, stop. * This callback is mandatory * @pointer: Retrieve current h/w pointer information. Mandatory @@ -106,6 +108,8 @@ struct snd_compr_ops { struct snd_compr_metadata *metadata); int (*get_metadata)(struct snd_compr_stream *stream, struct snd_compr_metadata *metadata); + int (*set_next_track_param)(struct snd_compr_stream *stream, + union snd_codec_options *codec_options); int (*trigger)(struct snd_compr_stream *stream, int cmd); int (*pointer)(struct snd_compr_stream *stream, struct snd_compr_tstamp *tstamp); diff --git a/include/sound/jack.h b/include/sound/jack.h index 1e84bfb553cf75017f9485a355b311460320235e..722a20e05f089c41a309ae5c5a0a9e59ebec4a41 100644 --- a/include/sound/jack.h +++ b/include/sound/jack.h @@ -58,19 +58,22 @@ enum snd_jack_types { SND_JACK_VIDEOOUT = 0x0010, SND_JACK_AVOUT = SND_JACK_LINEOUT | SND_JACK_VIDEOOUT, SND_JACK_LINEIN = 0x0020, + SND_JACK_OC_HPHL = 0x0040, + SND_JACK_OC_HPHR = 0x0080, + SND_JACK_UNSUPPORTED = 0x0100, + SND_JACK_MICROPHONE2 = 0x0200, + SND_JACK_ANC_HEADPHONE = SND_JACK_HEADPHONE | SND_JACK_MICROPHONE | + SND_JACK_MICROPHONE2, /* Kept separate from switches to facilitate implementation */ - SND_JACK_BTN_0 = 0x4000, - SND_JACK_BTN_1 = 0x2000, - SND_JACK_BTN_2 = 0x1000, - SND_JACK_BTN_3 = 0x0800, - SND_JACK_BTN_4 = 0x0400, - SND_JACK_BTN_5 = 0x0200, + SND_JACK_BTN_0 = 0x8000, + SND_JACK_BTN_1 = 0x4000, + SND_JACK_BTN_2 = 0x2000, + SND_JACK_BTN_3 = 0x1000, + SND_JACK_BTN_4 = 0x0800, + SND_JACK_BTN_5 = 0x0400, }; -/* Keep in sync with definitions above */ -#define SND_JACK_SWITCH_TYPES 6 - struct snd_jack { struct list_head kctl_list; struct snd_card *card; diff --git a/include/sound/pcm.h b/include/sound/pcm.h index ead7dea4bc2b6803e52127c6ca935dd582ea1f6c..a94e06281eaa165e8d755209fb133f1dce5466ca 100644 --- a/include/sound/pcm.h +++ b/include/sound/pcm.h @@ -476,6 +476,7 @@ struct snd_pcm_substream { const struct snd_pcm_ops *ops; /* -- runtime information -- */ struct snd_pcm_runtime *runtime; + spinlock_t runtime_lock; /* -- timer section -- */ struct snd_timer *timer; /* timer */ unsigned timer_running: 1; /* time is running */ diff --git a/include/sound/soc.h b/include/sound/soc.h index 6a03bda1ffb9af594a321cef19f94670b813cd81..65ca725d75799dde07797b317ba188fb6243a774 100644 --- a/include/sound/soc.h +++ b/include/sound/soc.h @@ -1067,6 +1067,7 @@ struct snd_soc_card { struct mutex mutex; struct mutex dapm_mutex; + struct mutex dapm_power_mutex; bool instantiated; bool topology_shortname_created; diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h index f2e6abea84905abdd4686fd5f277b09af2abcadd..70d18444d18d81d87ce2f18179075b147377b07d 100644 --- a/include/target/iscsi/iscsi_target_core.h +++ b/include/target/iscsi/iscsi_target_core.h @@ -674,7 +674,7 @@ struct iscsi_session { atomic_t session_logout; atomic_t session_reinstatement; atomic_t session_stop_active; - atomic_t sleep_on_sess_wait_comp; + atomic_t session_close; /* connection list */ struct list_head sess_conn_list; struct list_head cr_active_list; diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h index ada9d991d283a0bd1cb0f40a0495631e8e72307d..7daa8e5f6186fd7318bcec156b72e7d41a566a13 100644 --- a/include/trace/events/f2fs.h +++ b/include/trace/events/f2fs.h @@ -1822,6 +1822,77 @@ DEFINE_EVENT(f2fs_zip_end, f2fs_decompress_pages_end, TP_ARGS(inode, cluster_idx, compressed_size, ret) ); +TRACE_EVENT(f2fs_iostat, + + TP_PROTO(struct f2fs_sb_info *sbi, unsigned long long *iostat), + + TP_ARGS(sbi, iostat), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(unsigned long long, app_dio) + __field(unsigned long long, app_bio) + __field(unsigned long long, app_wio) + __field(unsigned long long, app_mio) + __field(unsigned long long, fs_dio) + __field(unsigned long long, fs_nio) + __field(unsigned long long, fs_mio) + __field(unsigned long long, fs_gc_dio) + __field(unsigned long long, fs_gc_nio) + __field(unsigned long long, fs_cp_dio) + __field(unsigned long long, fs_cp_nio) + __field(unsigned long long, fs_cp_mio) + __field(unsigned long long, app_drio) + __field(unsigned long long, app_brio) + __field(unsigned long long, app_rio) + __field(unsigned long long, app_mrio) + __field(unsigned long long, fs_drio) + __field(unsigned long long, fs_nrio) + __field(unsigned long long, fs_mrio) + __field(unsigned long long, fs_discard) + ), + + TP_fast_assign( + __entry->dev = sbi->sb->s_dev; + __entry->app_dio = iostat[APP_DIRECT_IO]; + __entry->app_bio = iostat[APP_BUFFERED_IO]; + __entry->app_wio = iostat[APP_WRITE_IO]; + __entry->app_mio = iostat[APP_MAPPED_IO]; + __entry->fs_dio = iostat[FS_DATA_IO]; + __entry->fs_nio = iostat[FS_NODE_IO]; + __entry->fs_mio = iostat[FS_META_IO]; + __entry->fs_gc_dio = iostat[FS_GC_DATA_IO]; + __entry->fs_gc_nio = iostat[FS_GC_NODE_IO]; + __entry->fs_cp_dio = iostat[FS_CP_DATA_IO]; + __entry->fs_cp_nio = iostat[FS_CP_NODE_IO]; + __entry->fs_cp_mio = iostat[FS_CP_META_IO]; + __entry->app_drio = iostat[APP_DIRECT_READ_IO]; + __entry->app_brio = iostat[APP_BUFFERED_READ_IO]; + __entry->app_rio = iostat[APP_READ_IO]; + __entry->app_mrio = iostat[APP_MAPPED_READ_IO]; + __entry->fs_drio = iostat[FS_DATA_READ_IO]; + __entry->fs_nrio = iostat[FS_NODE_READ_IO]; + __entry->fs_mrio = iostat[FS_META_READ_IO]; + __entry->fs_discard = iostat[FS_DISCARD]; + ), + + TP_printk("dev = (%d,%d), " + "app [write=%llu (direct=%llu, buffered=%llu), mapped=%llu], " + "fs [data=%llu, node=%llu, meta=%llu, discard=%llu], " + "gc [data=%llu, node=%llu], " + "cp [data=%llu, node=%llu, meta=%llu], " + "app [read=%llu (direct=%llu, buffered=%llu), mapped=%llu], " + "fs [data=%llu, node=%llu, meta=%llu]", + show_dev(__entry->dev), __entry->app_wio, __entry->app_dio, + __entry->app_bio, __entry->app_mio, __entry->fs_dio, + __entry->fs_nio, __entry->fs_mio, __entry->fs_discard, + __entry->fs_gc_dio, __entry->fs_gc_nio, __entry->fs_cp_dio, + __entry->fs_cp_nio, __entry->fs_cp_mio, + __entry->app_rio, __entry->app_drio, __entry->app_brio, + __entry->app_mrio, __entry->fs_drio, __entry->fs_nrio, + __entry->fs_mrio) +); + #endif /* _TRACE_F2FS_H */ /* This part must be outside protection */ diff --git a/include/trace/events/gpu_mem.h b/include/trace/events/gpu_mem.h new file mode 100644 index 0000000000000000000000000000000000000000..1897822a9150659127631d4220702bd639beef5f --- /dev/null +++ b/include/trace/events/gpu_mem.h @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * GPU memory trace points + * + * Copyright (C) 2020 Google, Inc. + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM gpu_mem + +#if !defined(_TRACE_GPU_MEM_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_GPU_MEM_H + +#include + +/* + * The gpu_memory_total event indicates that there's an update to either the + * global or process total gpu memory counters. + * + * This event should be emitted whenever the kernel device driver allocates, + * frees, imports, unimports memory in the GPU addressable space. + * + * @gpu_id: This is the gpu id. + * + * @pid: Put 0 for global total, while positive pid for process total. + * + * @size: Virtual size of the allocation in bytes. + * + */ +TRACE_EVENT(gpu_mem_total, + + TP_PROTO(uint32_t gpu_id, uint32_t pid, uint64_t size), + + TP_ARGS(gpu_id, pid, size), + + TP_STRUCT__entry( + __field(uint32_t, gpu_id) + __field(uint32_t, pid) + __field(uint64_t, size) + ), + + TP_fast_assign( + __entry->gpu_id = gpu_id; + __entry->pid = pid; + __entry->size = size; + ), + + TP_printk("gpu_id=%u pid=%u size=%llu", + __entry->gpu_id, + __entry->pid, + __entry->size) +); + +#endif /* _TRACE_GPU_MEM_H */ + +/* This part must be outside protection */ +#include diff --git a/include/trace/events/iommu.h b/include/trace/events/iommu.h index 270875a9dfaa51c25ecc95f612c8138a9902f4aa..0db6f7f86270ddb47859d99b42936014920edaba 100644 --- a/include/trace/events/iommu.h +++ b/include/trace/events/iommu.h @@ -12,8 +12,10 @@ #define _TRACE_IOMMU_H #include +#include struct device; +struct iommu_domain; DECLARE_EVENT_CLASS(iommu_group_event, @@ -85,47 +87,84 @@ DEFINE_EVENT(iommu_device_event, detach_device_from_domain, TRACE_EVENT(map, - TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size), + TP_PROTO(struct iommu_domain *domain, unsigned long iova, + phys_addr_t paddr, size_t size, int prot), - TP_ARGS(iova, paddr, size), + TP_ARGS(domain, iova, paddr, size, prot), TP_STRUCT__entry( + __string(name, domain->name) __field(u64, iova) __field(u64, paddr) __field(size_t, size) + __field(int, prot) ), TP_fast_assign( + __assign_str(name, domain->name); __entry->iova = iova; __entry->paddr = paddr; __entry->size = size; + __entry->prot = prot; ), - TP_printk("IOMMU: iova=0x%016llx paddr=0x%016llx size=%zu", - __entry->iova, __entry->paddr, __entry->size + TP_printk("IOMMU:%s iova=0x%016llx paddr=0x%016llx size=0x%zx prot=0x%x", + __get_str(name), __entry->iova, __entry->paddr, + __entry->size, __entry->prot ) ); TRACE_EVENT(unmap, - TP_PROTO(unsigned long iova, size_t size, size_t unmapped_size), + TP_PROTO(struct iommu_domain *domain, unsigned long iova, size_t size, + size_t unmapped_size), - TP_ARGS(iova, size, unmapped_size), + TP_ARGS(domain, iova, size, unmapped_size), TP_STRUCT__entry( + __string(name, domain->name) __field(u64, iova) __field(size_t, size) __field(size_t, unmapped_size) ), TP_fast_assign( + __assign_str(name, domain->name); __entry->iova = iova; __entry->size = size; __entry->unmapped_size = unmapped_size; ), - TP_printk("IOMMU: iova=0x%016llx size=%zu unmapped_size=%zu", - __entry->iova, __entry->size, __entry->unmapped_size + TP_printk("IOMMU:%s iova=0x%016llx size=0x%zx unmapped_size=0x%zx", + __get_str(name), __entry->iova, __entry->size, + __entry->unmapped_size + ) +); + +TRACE_EVENT(map_sg, + + TP_PROTO(struct iommu_domain *domain, unsigned long iova, size_t size, + int prot), + + TP_ARGS(domain, iova, size, prot), + + TP_STRUCT__entry( + __string(name, domain->name) + __field(u64, iova) + __field(size_t, size) + __field(int, prot) + ), + + TP_fast_assign( + __assign_str(name, domain->name); + __entry->iova = iova; + __entry->size = size; + __entry->prot = prot; + ), + + TP_printk("IOMMU:%s iova=0x%016llx size=0x%zx prot=0x%x", + __get_str(name), __entry->iova, __entry->size, + __entry->prot ) ); diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h index 4c91cadd1871d682891fb15d623ae77c830b6467..4a0b18921e06a11ec7c0f6e5c6bfd9c10dbd2e1f 100644 --- a/include/trace/events/rpcrdma.h +++ b/include/trace/events/rpcrdma.h @@ -1322,17 +1322,15 @@ DECLARE_EVENT_CLASS(svcrdma_sendcomp_event, TRACE_EVENT(svcrdma_post_send, TP_PROTO( - const struct ib_send_wr *wr, - int status + const struct ib_send_wr *wr ), - TP_ARGS(wr, status), + TP_ARGS(wr), TP_STRUCT__entry( __field(const void *, cqe) __field(unsigned int, num_sge) __field(u32, inv_rkey) - __field(int, status) ), TP_fast_assign( @@ -1340,12 +1338,11 @@ TRACE_EVENT(svcrdma_post_send, __entry->num_sge = wr->num_sge; __entry->inv_rkey = (wr->opcode == IB_WR_SEND_WITH_INV) ? wr->ex.invalidate_rkey : 0; - __entry->status = status; ), - TP_printk("cqe=%p num_sge=%u inv_rkey=0x%08x status=%d", + TP_printk("cqe=%p num_sge=%u inv_rkey=0x%08x", __entry->cqe, __entry->num_sge, - __entry->inv_rkey, __entry->status + __entry->inv_rkey ) ); @@ -1410,26 +1407,23 @@ TRACE_EVENT(svcrdma_wc_receive, TRACE_EVENT(svcrdma_post_rw, TP_PROTO( const void *cqe, - int sqecount, - int status + int sqecount ), - TP_ARGS(cqe, sqecount, status), + TP_ARGS(cqe, sqecount), TP_STRUCT__entry( __field(const void *, cqe) __field(int, sqecount) - __field(int, status) ), TP_fast_assign( __entry->cqe = cqe; __entry->sqecount = sqecount; - __entry->status = status; ), - TP_printk("cqe=%p sqecount=%d status=%d", - __entry->cqe, __entry->sqecount, __entry->status + TP_printk("cqe=%p sqecount=%d", + __entry->cqe, __entry->sqecount ) ); @@ -1525,6 +1519,34 @@ DECLARE_EVENT_CLASS(svcrdma_sendqueue_event, DEFINE_SQ_EVENT(full); DEFINE_SQ_EVENT(retry); +TRACE_EVENT(svcrdma_sq_post_err, + TP_PROTO( + const struct svcxprt_rdma *rdma, + int status + ), + + TP_ARGS(rdma, status), + + TP_STRUCT__entry( + __field(int, avail) + __field(int, depth) + __field(int, status) + __string(addr, rdma->sc_xprt.xpt_remotebuf) + ), + + TP_fast_assign( + __entry->avail = atomic_read(&rdma->sc_sq_avail); + __entry->depth = rdma->sc_sq_depth; + __entry->status = status; + __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); + ), + + TP_printk("addr=%s sc_sq_avail=%d/%d status=%d", + __get_str(addr), __entry->avail, __entry->depth, + __entry->status + ) +); + #endif /* _TRACE_RPCRDMA_H */ #include diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h index 721ab7e54d96d95afbd895b298729c0e1e172580..25193b3e0b40b1721aee4a5ba4937dfaf5460bd0 100644 --- a/include/uapi/drm/drm_fourcc.h +++ b/include/uapi/drm/drm_fourcc.h @@ -312,6 +312,30 @@ extern "C" { */ #define DRM_FORMAT_MOD_QCOM_COMPRESSED fourcc_mod_code(QCOM, 1) +/* + * QTI DX Format + * + * Refers to a DX variant of the base format. + * Implementation may be platform and base-format specific. + */ +#define DRM_FORMAT_MOD_QCOM_DX fourcc_mod_code(QCOM, 0x2) + +/* + * QTI Tight Format + * + * Refers to a tightly packed variant of the base format. + * Implementation may be platform and base-format specific. + */ +#define DRM_FORMAT_MOD_QCOM_TIGHT fourcc_mod_code(QCOM, 0x4) + +/* + * QTI Tile Format + * + * Refers to a tile variant of the base format. + * Implementation may be platform and base-format specific. + */ +#define DRM_FORMAT_MOD_QCOM_TILE fourcc_mod_code(QCOM, 0x8) + /* Vivante framebuffer modifiers */ /* diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h index 8d67243952f4f104f8b2ca745b4d94aa2565cbcc..9666a1f42e3fd7037d9b78e5b87b3e4623208081 100644 --- a/include/uapi/drm/drm_mode.h +++ b/include/uapi/drm/drm_mode.h @@ -116,6 +116,13 @@ extern "C" { #define DRM_MODE_FLAG_PIC_AR_256_135 \ (DRM_MODE_PICTURE_ASPECT_256_135<<19) +#define DRM_MODE_FLAG_SUPPORTS_RGB (1<<27) + +#define DRM_MODE_FLAG_SUPPORTS_YUV (1<<28) +#define DRM_MODE_FLAG_VID_MODE_PANEL (1<<29) +#define DRM_MODE_FLAG_CMD_MODE_PANEL (1<<30) +#define DRM_MODE_FLAG_SEAMLESS (1<<31) + #define DRM_MODE_FLAG_ALL (DRM_MODE_FLAG_PHSYNC | \ DRM_MODE_FLAG_NHSYNC | \ DRM_MODE_FLAG_PVSYNC | \ @@ -128,6 +135,10 @@ extern "C" { DRM_MODE_FLAG_HSKEW | \ DRM_MODE_FLAG_DBLCLK | \ DRM_MODE_FLAG_CLKDIV2 | \ + DRM_MODE_FLAG_SUPPORTS_RGB | \ + DRM_MODE_FLAG_SUPPORTS_YUV | \ + DRM_MODE_FLAG_VID_MODE_PANEL | \ + DRM_MODE_FLAG_CMD_MODE_PANEL | \ DRM_MODE_FLAG_3D_MASK) /* DPMS flags */ @@ -475,6 +486,7 @@ struct drm_mode_fb_cmd { #define DRM_MODE_FB_INTERLACED (1<<0) /* for interlaced framebuffers */ #define DRM_MODE_FB_MODIFIERS (1<<1) /* enables ->modifer[] */ +#define DRM_MODE_FB_SECURE (1<<2) /* for secure framebuffers */ struct drm_mode_fb_cmd2 { __u32 fb_id; diff --git a/include/uapi/linux/coresight-stm.h b/include/uapi/linux/coresight-stm.h index aac550a52f801de068a707c9f649d31d469c2374..8847dbf241515493478a9707685be55c5a71b134 100644 --- a/include/uapi/linux/coresight-stm.h +++ b/include/uapi/linux/coresight-stm.h @@ -2,8 +2,10 @@ #ifndef __UAPI_CORESIGHT_STM_H_ #define __UAPI_CORESIGHT_STM_H_ -#define STM_FLAG_TIMESTAMPED BIT(3) -#define STM_FLAG_GUARANTEED BIT(7) +#include + +#define STM_FLAG_TIMESTAMPED _BITUL(3) +#define STM_FLAG_GUARANTEED _BITUL(7) /* * The CoreSight STM supports guaranteed and invariant timing diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h index 61a5799b440b9f66758be86f4b20f6be6e877387..7a03c1534694d61ed2c67c3a871446dc29b0e33f 100644 --- a/include/uapi/linux/input-event-codes.h +++ b/include/uapi/linux/input-event-codes.h @@ -795,7 +795,11 @@ #define SW_LINEIN_INSERT 0x0d /* set = inserted */ #define SW_MUTE_DEVICE 0x0e /* set = device disabled */ #define SW_PEN_INSERTED 0x0f /* set = pen inserted */ -#define SW_MAX 0x0f +#define SW_HPHL_OVERCURRENT 0x10 /* set = over current on left hph */ +#define SW_HPHR_OVERCURRENT 0x11 /* set = over current on right hph */ +#define SW_MICROPHONE2_INSERT 0x12 /* set = inserted */ +#define SW_UNSUPPORT_INSERT 0x13 /* set = unsupported device inserted */ +#define SW_MAX 0x20 #define SW_CNT (SW_MAX+1) /* diff --git a/include/uapi/linux/ip.h b/include/uapi/linux/ip.h index e42d13b55cf3acafd90f0ec6d13ebd685ad3f684..d3a717d80a15a5aae2867b4bb1785d389d017707 100644 --- a/include/uapi/linux/ip.h +++ b/include/uapi/linux/ip.h @@ -169,6 +169,7 @@ enum IPV4_DEVCONF_DROP_UNICAST_IN_L2_MULTICAST, IPV4_DEVCONF_DROP_GRATUITOUS_ARP, IPV4_DEVCONF_BC_FORWARDING, + IPV4_DEVCONF_VENDOR_PADDING, __IPV4_DEVCONF_MAX }; diff --git a/include/uapi/linux/limits.h b/include/uapi/linux/limits.h index c3547f07605c9e2ca28e4ced0fbba545b55d21a6..6bcbe306876196e53064dcd2f4099696208a1a48 100644 --- a/include/uapi/linux/limits.h +++ b/include/uapi/linux/limits.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -#ifndef _LINUX_LIMITS_H -#define _LINUX_LIMITS_H +#ifndef _UAPI_LINUX_LIMITS_H +#define _UAPI_LINUX_LIMITS_H #define NR_OPEN 1024 diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 9913087bc3c356721c33c32d83925b228fa19f19..e98d90b5e6a418889b652025b272172c3e3d8c60 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -1038,6 +1038,43 @@ * %NL80211_ATTR_CHANNEL_WIDTH,%NL80211_ATTR_NSS attributes with its * address(specified in %NL80211_ATTR_MAC). * + * @NL80211_CMD_GET_FTM_RESPONDER_STATS: Retrieve FTM responder statistics, in + * the %NL80211_ATTR_FTM_RESPONDER_STATS attribute. + * + * @NL80211_CMD_PEER_MEASUREMENT_START: start a (set of) peer measurement(s) + * with the given parameters, which are encapsulated in the nested + * %NL80211_ATTR_PEER_MEASUREMENTS attribute. Optionally, MAC address + * randomization may be enabled and configured by specifying the + * %NL80211_ATTR_MAC and %NL80211_ATTR_MAC_MASK attributes. + * If a timeout is requested, use the %NL80211_ATTR_TIMEOUT attribute. + * A u64 cookie for further %NL80211_ATTR_COOKIE use is is returned in + * the netlink extended ack message. + * + * To cancel a measurement, close the socket that requested it. + * + * Measurement results are reported to the socket that requested the + * measurement using @NL80211_CMD_PEER_MEASUREMENT_RESULT when they + * become available, so applications must ensure a large enough socket + * buffer size. + * + * Depending on driver support it may or may not be possible to start + * multiple concurrent measurements. + * @NL80211_CMD_PEER_MEASUREMENT_RESULT: This command number is used for the + * result notification from the driver to the requesting socket. + * @NL80211_CMD_PEER_MEASUREMENT_COMPLETE: Notification only, indicating that + * the measurement completed, using the measurement cookie + * (%NL80211_ATTR_COOKIE). + * + * @NL80211_CMD_NOTIFY_RADAR: Notify the kernel that a radar signal was + * detected and reported by a neighboring device on the channel + * indicated by %NL80211_ATTR_WIPHY_FREQ and other attributes + * determining the width and type. + * + * @NL80211_CMD_UPDATE_OWE_INFO: This interface allows the host driver to + * offload OWE processing to user space. This intends to support + * OWE AKM by the host drivers that implement SME but rely + * on the user space for the cryptographic/DH IE processing in AP mode. + * * @NL80211_CMD_MAX: highest used command number * @__NL80211_CMD_AFTER_LAST: internal use */ @@ -1250,6 +1287,16 @@ enum nl80211_commands { NL80211_CMD_CONTROL_PORT_FRAME, + NL80211_CMD_GET_FTM_RESPONDER_STATS, + + NL80211_CMD_PEER_MEASUREMENT_START, + NL80211_CMD_PEER_MEASUREMENT_RESULT, + NL80211_CMD_PEER_MEASUREMENT_COMPLETE, + + NL80211_CMD_NOTIFY_RADAR, + + NL80211_CMD_UPDATE_OWE_INFO, + /* add new commands above here */ /* used to define NL80211_CMD_MAX below */ @@ -2292,6 +2339,18 @@ enum nl80211_commands { * the allowed channel bandwidth configurations. (u8 attribute) * Defined by IEEE P802.11ay/D4.0 section 9.4.2.251, Table 13. * + * @NL80211_ATTR_VLAN_ID: VLAN ID (1..4094) for the station and VLAN group key + * (u16). + * + * @NL80211_ATTR_HE_BSS_COLOR: nested attribute for BSS Color Settings. + * + * @NL80211_ATTR_IFTYPE_AKM_SUITES: nested array attribute, with each entry + * using attributes from &enum nl80211_iftype_akm_attributes. This + * attribute is sent in a response to %NL80211_CMD_GET_WIPHY indicating + * supported AKM suites capability per interface. AKMs advertised in + * %NL80211_ATTR_AKM_SUITES are default capabilities if AKM suites not + * advertised for a specific interface type. + * * @NUM_NL80211_ATTR: total number of nl80211_attrs available * @NL80211_ATTR_MAX: highest attribute number currently defined * @__NL80211_ATTR_AFTER_LAST: internal use @@ -2754,6 +2813,12 @@ enum nl80211_attrs { NL80211_ATTR_WIPHY_EDMG_CHANNELS, NL80211_ATTR_WIPHY_EDMG_BW_CONFIG, + NL80211_ATTR_VLAN_ID, + + NL80211_ATTR_HE_BSS_COLOR, + + NL80211_ATTR_IFTYPE_AKM_SUITES, + /* add attributes here, update the policy in nl80211.c */ __NL80211_ATTR_AFTER_LAST, @@ -3124,6 +3189,12 @@ enum nl80211_sta_bss_param { * @NL80211_STA_INFO_ACK_SIGNAL: signal strength of the last ACK frame(u8, dBm) * @NL80211_STA_INFO_DATA_ACK_SIGNAL_AVG: avg signal strength of (data) * ACK frame (s8, dBm) + * @NL80211_STA_INFO_RX_MPDUS: total number of received packets (MPDUs) + * (u32, from this station) + * @NL80211_STA_INFO_FCS_ERROR_COUNT: total number of packets (MPDUs) received + * with an FCS error (u32, from this station). This count may not include + * some packets with an FCS error due to TA corruption. Hence this counter + * might not be fully accurate. * @__NL80211_STA_INFO_AFTER_LAST: internal * @NL80211_STA_INFO_MAX: highest possible station info attribute */ @@ -3164,6 +3235,8 @@ enum nl80211_sta_info { NL80211_STA_INFO_PAD, NL80211_STA_INFO_ACK_SIGNAL, NL80211_STA_INFO_DATA_ACK_SIGNAL_AVG, + NL80211_STA_INFO_RX_MPDUS, + NL80211_STA_INFO_FCS_ERROR_COUNT, /* keep last */ __NL80211_STA_INFO_AFTER_LAST, @@ -4352,6 +4425,10 @@ enum nl80211_key_default_types { * @NL80211_KEY_DEFAULT_TYPES: A nested attribute containing flags * attributes, specifying what a key should be set as default as. * See &enum nl80211_key_default_types. + * @NL80211_KEY_MODE: the mode from enum nl80211_key_mode. + * Defaults to @NL80211_KEY_RX_TX. + * @NL80211_KEY_DEFAULT_BEACON: flag indicating default Beacon frame key + * * @__NL80211_KEY_AFTER_LAST: internal * @NL80211_KEY_MAX: highest key attribute */ @@ -4365,6 +4442,8 @@ enum nl80211_key_attributes { NL80211_KEY_DEFAULT_MGMT, NL80211_KEY_TYPE, NL80211_KEY_DEFAULT_TYPES, + NL80211_KEY_MODE, + NL80211_KEY_DEFAULT_BEACON, /* keep last */ __NL80211_KEY_AFTER_LAST, @@ -5304,6 +5383,40 @@ enum nl80211_feature_flags { * @NL80211_EXT_FEATURE_SCAN_MIN_PREQ_CONTENT: Driver/device can omit all data * except for supported rates from the probe request content if requested * by the %NL80211_SCAN_FLAG_MIN_PREQ_CONTENT flag. + * @NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER: Driver supports enabling fine + * timing measurement responder role. + * + * @NL80211_EXT_FEATURE_CAN_REPLACE_PTK0: Driver/device confirm that they are + * able to rekey an in-use key correctly. Userspace must not rekey PTK keys + * if this flag is not set. Ignoring this can leak clear text packets and/or + * freeze the connection. + * + * @NL80211_EXT_FEATURE_AIRTIME_FAIRNESS: Driver supports getting airtime + * fairness for transmitted packets and has enabled airtime fairness + * scheduling. + * + * @NL80211_EXT_FEATURE_AP_PMKSA_CACHING: Driver/device supports PMKSA caching + * (set/del PMKSA operations) in AP mode. + * + * @NL80211_EXT_FEATURE_SCHED_SCAN_BAND_SPECIFIC_RSSI_THOLD: Driver supports + * filtering of sched scan results using band specific RSSI thresholds. + * + * @NL80211_EXT_FEATURE_STA_TX_PWR: This driver supports controlling tx power + * to a station. + * + * @NL80211_EXT_FEATURE_SAE_OFFLOAD: Device wants to do SAE authentication in + * station mode (SAE password is passed as part of the connect command). + * + * @NL80211_EXT_FEATURE_VLAN_OFFLOAD: The driver supports a single netdev + * with VLAN tagged frames and separate VLAN-specific netdevs added using + * vconfig similarly to the Ethernet case. + * + * @NL80211_EXT_FEATURE_AQL: The driver supports the Airtime Queue Limit (AQL) + * feature, which prevents bufferbloat by using the expected transmission + * time to limit the amount of data buffered in the hardware. + * + * @NL80211_EXT_FEATURE_BEACON_PROTECTION: The driver supports Beacon protection + * and can receive key configuration for BIGTK using key indexes 6 and 7. * * @NUM_NL80211_EXT_FEATURES: number of extended features. * @MAX_NL80211_EXT_FEATURES: highest extended feature index. @@ -5340,6 +5453,17 @@ enum nl80211_ext_feature_index { NL80211_EXT_FEATURE_TXQS, NL80211_EXT_FEATURE_SCAN_RANDOM_SN, NL80211_EXT_FEATURE_SCAN_MIN_PREQ_CONTENT, + NL80211_EXT_FEATURE_CAN_REPLACE_PTK0, + NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER, + NL80211_EXT_FEATURE_AIRTIME_FAIRNESS, + NL80211_EXT_FEATURE_AP_PMKSA_CACHING, + NL80211_EXT_FEATURE_SCHED_SCAN_BAND_SPECIFIC_RSSI_THOLD, + NL80211_EXT_FEATURE_EXT_KEY_ID, + NL80211_EXT_FEATURE_STA_TX_PWR, + NL80211_EXT_FEATURE_SAE_OFFLOAD, + NL80211_EXT_FEATURE_VLAN_OFFLOAD, + NL80211_EXT_FEATURE_AQL, + NL80211_EXT_FEATURE_BEACON_PROTECTION, /* add new features before the definition below */ NUM_NL80211_EXT_FEATURES, @@ -5884,4 +6008,28 @@ enum nl80211_external_auth_action { NL80211_EXTERNAL_AUTH_ABORT, }; +/** + * enum nl80211_iftype_akm_attributes - interface type AKM attributes + * @__NL80211_IFTYPE_AKM_ATTR_INVALID: Invalid + * + * @NL80211_IFTYPE_AKM_ATTR_IFTYPES: nested attribute containing a flag + * attribute for each interface type that supports AKM suites specified in + * %NL80211_IFTYPE_AKM_ATTR_SUITES + * @NL80211_IFTYPE_AKM_ATTR_SUITES: an array of u32. Used to indicate supported + * AKM suites for the specified interface types. + * + * @__NL80211_IFTYPE_AKM_ATTR_LAST: Internal + * @NL80211_IFTYPE_AKM_ATTR_MAX: highest interface type AKM attribute. + */ +enum nl80211_iftype_akm_attributes { + __NL80211_IFTYPE_AKM_ATTR_INVALID, + + NL80211_IFTYPE_AKM_ATTR_IFTYPES, + NL80211_IFTYPE_AKM_ATTR_SUITES, + + /* keep last */ + __NL80211_IFTYPE_AKM_ATTR_LAST, + NL80211_IFTYPE_AKM_ATTR_MAX = __NL80211_IFTYPE_AKM_ATTR_LAST - 1, +}; + #endif /* __LINUX_NL80211_H */ diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h index 23cd84868cc3bd8907994664c82407ca49a1d8d9..7272f85d6d6ab5e24dd806e7f88760eaafcd7326 100644 --- a/include/uapi/linux/swab.h +++ b/include/uapi/linux/swab.h @@ -4,6 +4,7 @@ #include #include +#include #include /* @@ -132,6 +133,15 @@ static inline __attribute_const__ __u32 __fswahb32(__u32 val) __fswab64(x)) #endif +static __always_inline unsigned long __swab(const unsigned long y) +{ +#if __BITS_PER_LONG == 64 + return __swab64(y); +#else /* __BITS_PER_LONG == 32 */ + return __swab32(y); +#endif +} + /** * __swahw32 - return a word-swapped 32-bit value * @x: value to wordswap diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h index d5a5caec8fbcaf6df65b9aa5191561e8e9484428..321c7c16656edfc72fe143ba7a850c8f0e62d37f 100644 --- a/include/uapi/linux/usb/ch9.h +++ b/include/uapi/linux/usb/ch9.h @@ -1081,6 +1081,26 @@ struct usb_ptm_cap_descriptor { */ #define USB_DT_USB_SSP_CAP_SIZE(ssac) (12 + (ssac + 1) * 4) +/* + * Configuration Summary descriptors: Defines a list of device preferred + * configurations. This descriptor may be used by Host software to decide + * which Configuration to use to obtain the desired functionality. + */ +#define USB_CAP_TYPE_CONFIG_SUMMARY 0x10 +#define USB_CONFIG_SUMMARY_DESC_REV 0x100 + +struct usb_config_summary_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDevCapabilityType; + __u16 bcdVersion; + __u8 bClass; + __u8 bSubClass; + __u8 bProtocol; + __u8 bConfigurationCount; + __u8 bConfigurationIndex[]; +} __attribute__((packed)); + /*-------------------------------------------------------------------------*/ /* USB_DT_WIRELESS_ENDPOINT_COMP: companion descriptor associated with diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h index 1aae2e4b8f1021c2aa23770e2e83c1649bd79122..f4742d0ffdeb413db1aa763abe94e51218f8a634 100644 --- a/include/uapi/linux/videodev2.h +++ b/include/uapi/linux/videodev2.h @@ -70,7 +70,7 @@ * Common stuff for both V4L1 and V4L2 * Moved from videodev.h */ -#define VIDEO_MAX_FRAME 32 +#define VIDEO_MAX_FRAME 64 #define VIDEO_MAX_PLANES 8 /* @@ -564,6 +564,14 @@ struct v4l2_pix_format { #define V4L2_PIX_FMT_NV24 v4l2_fourcc('N', 'V', '2', '4') /* 24 Y/CbCr 4:4:4 */ #define V4L2_PIX_FMT_NV42 v4l2_fourcc('N', 'V', '4', '2') /* 24 Y/CrCb 4:4:4 */ +/* NV12_512 8-bit Y/CbCr 4:2:0 */ +#define V4L2_PIX_FMT_NV12_512 v4l2_fourcc('Q', '5', '1', '2') + +/* UBWC 8-bit Y/CbCr 4:2:0 */ +#define V4L2_PIX_FMT_NV12_UBWC v4l2_fourcc('Q', '1', '2', '8') +/* UBWC 10-bit Y/CbCr 4:2:0 */ +#define V4L2_PIX_FMT_NV12_TP10_UBWC v4l2_fourcc('Q', '1', '2', 'A') + /* two non contiguous planes - one Y, one Cr + Cb interleaved */ #define V4L2_PIX_FMT_NV12M v4l2_fourcc('N', 'M', '1', '2') /* 12 Y/CbCr 4:2:0 */ #define V4L2_PIX_FMT_NV21M v4l2_fourcc('N', 'M', '2', '1') /* 21 Y/CrCb 4:2:0 */ @@ -693,6 +701,8 @@ struct v4l2_pix_format { #define V4L2_PIX_FMT_IPU3_SGBRG10 v4l2_fourcc('i', 'p', '3', 'g') /* IPU3 packed 10-bit GBRG bayer */ #define V4L2_PIX_FMT_IPU3_SGRBG10 v4l2_fourcc('i', 'p', '3', 'G') /* IPU3 packed 10-bit GRBG bayer */ #define V4L2_PIX_FMT_IPU3_SRGGB10 v4l2_fourcc('i', 'p', '3', 'r') /* IPU3 packed 10-bit RGGB bayer */ +#define V4L2_PIX_FMT_SDE_Y_CBCR_H2V2_P010_VENUS \ + v4l2_fourcc('Q', 'P', '1', '0') /* Y/CbCr 4:2:0 P10 Venus*/ /* SDR formats - used only for Software Defined Radio devices */ #define V4L2_SDR_FMT_CU8 v4l2_fourcc('C', 'U', '0', '8') /* IQ u8 */ diff --git a/include/uapi/linux/virtio_gpu.h b/include/uapi/linux/virtio_gpu.h index 8e88eba1fa7a1c457d519a62f1dd51747cfad504..0c85914d936929098778386fef0daca8345ccb9c 100644 --- a/include/uapi/linux/virtio_gpu.h +++ b/include/uapi/linux/virtio_gpu.h @@ -40,8 +40,16 @@ #include -#define VIRTIO_GPU_F_VIRGL 0 -#define VIRTIO_GPU_F_EDID 1 +/* + * VIRTIO_GPU_CMD_CTX_* + * VIRTIO_GPU_CMD_*_3D + */ +#define VIRTIO_GPU_F_VIRGL 0 + +/* + * VIRTIO_GPU_CMD_GET_EDID + */ +#define VIRTIO_GPU_F_EDID 1 enum virtio_gpu_ctrl_type { VIRTIO_GPU_UNDEFINED = 0, diff --git a/include/uapi/sound/compress_offload.h b/include/uapi/sound/compress_offload.h index db5edf3f827b774120af1b8de757481b6b99af19..eac1e0166d268cfcee61d067db250de94da50c2e 100644 --- a/include/uapi/sound/compress_offload.h +++ b/include/uapi/sound/compress_offload.h @@ -122,6 +122,18 @@ struct snd_compr_codec_caps { struct snd_codec_desc descriptor[MAX_NUM_CODEC_DESCRIPTORS]; } __attribute__((packed, aligned(4))); +/** + * struct snd_compr_audio_info: compressed input audio information + * @frame_size: legth of the encoded frame with valid data + * @reserved: reserved for furture use + */ +struct snd_compr_audio_info { + __u32 frame_size; + __u32 reserved[15]; +} __attribute__((packed, aligned(4))); + +#define SND_COMPR_AUDIO_INFO + /** * enum sndrv_compress_encoder * @SNDRV_COMPRESS_ENCODER_PADDING: no of samples appended by the encoder at the @@ -160,6 +172,8 @@ struct snd_compr_metadata { * SNDRV_COMPRESS_STOP: stop a running stream, discarding ring buffer content * and the buffers currently with DSP * SNDRV_COMPRESS_DRAIN: Play till end of buffers and stop after that + * SNDRV_COMPRESS_SET_NEXT_TRACK_PARAM: send codec specific data for the next + * track in gapless * SNDRV_COMPRESS_IOCTL_VERSION: Query the API version */ #define SNDRV_COMPRESS_IOCTL_VERSION _IOR('C', 0x00, int) @@ -181,6 +195,8 @@ struct snd_compr_metadata { #define SNDRV_COMPRESS_DRAIN _IO('C', 0x34) #define SNDRV_COMPRESS_NEXT_TRACK _IO('C', 0x35) #define SNDRV_COMPRESS_PARTIAL_DRAIN _IO('C', 0x36) +#define SNDRV_COMPRESS_SET_NEXT_TRACK_PARAM\ + _IOW('C', 0x80, union snd_codec_options) /* * TODO * 1. add mmap support diff --git a/include/uapi/sound/compress_params.h b/include/uapi/sound/compress_params.h index 3d4d6de66a17ea91b18cb0ed7d1ba93d1df7367c..7e503c91cbf8bb7a53ee648c2cb95cbc26cbd1b7 100644 --- a/include/uapi/sound/compress_params.h +++ b/include/uapi/sound/compress_params.h @@ -54,12 +54,34 @@ #include +/* Maximum PCM channels */ +#define MAX_PCM_DECODE_CHANNELS 32 + /* AUDIO CODECS SUPPORTED */ #define MAX_NUM_CODECS 32 #define MAX_NUM_CODEC_DESCRIPTORS 32 #define MAX_NUM_BITRATES 32 #define MAX_NUM_SAMPLE_RATES 32 +/* compressed TX */ +#define MAX_NUM_FRAMES_PER_BUFFER 1 +#define COMPRESSED_META_DATA_MODE 0x10 +#define META_DATA_LEN_BYTES 36 +#define Q6_AC3_DECODER 0x00010BF6 +#define Q6_EAC3_DECODER 0x00010C3C +#define Q6_DTS 0x00010D88 +#define Q6_DTS_LBR 0x00010DBB + +/* Timestamp flsg */ +/* Bit-0 - 1 : Enable Timestamp mode */ +/* Bit-0 - 0 : Disable Timestamp mode */ +#define COMPRESSED_TIMESTAMP_FLAG 0x0001 + +/* Perf mode flag */ +/* Bit-1 - 1 : Enable perf mode */ +/* Bit-1 - 0 : Disable perf mode */ +#define COMPRESSED_PERF_MODE_FLAG 0x0002 + /* Codecs are listed linearly to allow for extensibility */ #define SND_AUDIOCODEC_PCM ((__u32) 0x00000001) #define SND_AUDIOCODEC_MP3 ((__u32) 0x00000002) @@ -75,7 +97,25 @@ #define SND_AUDIOCODEC_G723_1 ((__u32) 0x0000000C) #define SND_AUDIOCODEC_G729 ((__u32) 0x0000000D) #define SND_AUDIOCODEC_BESPOKE ((__u32) 0x0000000E) -#define SND_AUDIOCODEC_MAX SND_AUDIOCODEC_BESPOKE +#define SND_AUDIOCODEC_DTS_PASS_THROUGH ((__u32) 0x0000000F) +#define SND_AUDIOCODEC_DTS_LBR ((__u32) 0x00000010) +#define SND_AUDIOCODEC_DTS_TRANSCODE_LOOPBACK ((__u32) 0x00000011) +#define SND_AUDIOCODEC_PASS_THROUGH ((__u32) 0x00000012) +#define SND_AUDIOCODEC_MP2 ((__u32) 0x00000013) +#define SND_AUDIOCODEC_DTS_LBR_PASS_THROUGH ((__u32) 0x00000014) +#define SND_AUDIOCODEC_AC3 ((__u32) 0x00000015) +#define SND_AUDIOCODEC_AC3_PASS_THROUGH ((__u32) 0x00000016) +#define SND_AUDIOCODEC_WMA_PRO ((__u32) 0x00000017) +#define SND_AUDIOCODEC_DTS ((__u32) 0x00000018) +#define SND_AUDIOCODEC_EAC3 ((__u32) 0x00000019) +#define SND_AUDIOCODEC_ALAC ((__u32) 0x00000020) +#define SND_AUDIOCODEC_APE ((__u32) 0x00000021) +#define SND_AUDIOCODEC_DSD ((__u32) 0x00000022) +#define SND_AUDIOCODEC_APTX ((__u32) 0x00000023) +#define SND_AUDIOCODEC_TRUEHD ((__u32) 0x00000024) +#define SND_AUDIOCODEC_MAT ((__u32) 0x00000025) +#define SND_AUDIOCODEC_THD ((__u32) 0x00000026) +#define SND_AUDIOCODEC_MAX SND_AUDIOCODEC_THD /* * Profile and modes are listed with bit masks. This allows for a @@ -241,8 +281,15 @@ struct snd_enc_wma { __u32 super_block_align; /* WMA Type-specific data */ + __u32 bits_per_sample; + __u32 channelmask; + __u32 encodeopt; + __u32 encodeopt1; + __u32 encodeopt2; + __u32 avg_bit_rate; }; +#define SND_ENC_WMA_EXTENTED_SUPPORT /** * struct snd_enc_vorbis @@ -317,13 +364,96 @@ struct snd_enc_generic { __s32 reserved[15]; /* Can be used for SND_AUDIOCODEC_BESPOKE */ } __attribute__((packed, aligned(4))); +#define SND_DEC_THD_MAX_PARAMS 8 +struct snd_dec_thd { + __u32 params_length; + __u32 params_id[SND_DEC_THD_MAX_PARAMS]; + __u32 params_value[SND_DEC_THD_MAX_PARAMS]; +} __attribute__((packed, aligned(4))); + +struct snd_dec_flac { + __u16 sample_size; + __u16 min_blk_size; + __u16 max_blk_size; + __u16 min_frame_size; + __u16 max_frame_size; +} __attribute__((packed, aligned(4))); + +#define SND_DEC_FLAC_SUPPORTED + +struct snd_dec_vorbis { + __u32 bit_stream_fmt; +}; + +struct snd_dec_alac { + __u32 frame_length; + __u8 compatible_version; + __u8 bit_depth; + __u8 pb; + __u8 mb; + __u8 kb; + __u8 num_channels; + __u16 max_run; + __u32 max_frame_bytes; + __u32 avg_bit_rate; + __u32 sample_rate; + __u32 channel_layout_tag; +}; + +struct snd_dec_ape { + __u16 compatible_version; + __u16 compression_level; + __u32 format_flags; + __u32 blocks_per_frame; + __u32 final_frame_blocks; + __u32 total_frames; + __u16 bits_per_sample; + __u16 num_channels; + __u32 sample_rate; + __u32 seek_table_present; +}; + +struct snd_dec_aptx { + __u32 lap; + __u32 uap; + __u32 nap; +}; + +/** struct snd_dec_dsd - codec for DSD format + * @blk_size - dsd channel block size + */ +struct snd_dec_dsd { + __u32 blk_size; +}; + +/** struct snd_dec_pcm - codec options for PCM format + * @num_channels: Number of channels + * @ch_map: Channel map for the above corresponding channels + */ +struct snd_dec_pcm { + __u32 num_channels; + __u8 ch_map[MAX_PCM_DECODE_CHANNELS]; +} __attribute__((packed, aligned(4))); + +struct snd_dec_amrwb_plus { + __u32 bit_stream_fmt; +}; union snd_codec_options { struct snd_enc_wma wma; struct snd_enc_vorbis vorbis; struct snd_enc_real real; struct snd_enc_flac flac; struct snd_enc_generic generic; -} __attribute__((packed, aligned(4))); + struct snd_dec_flac flac_dec; + struct snd_dec_vorbis vorbis_dec; + struct snd_dec_alac alac; + struct snd_dec_ape ape; + struct snd_dec_aptx aptx_dec; + struct snd_dec_thd truehd; + struct snd_dec_pcm pcm_dec; + struct snd_dec_amrwb_plus amrwbplus; + struct snd_dec_dsd dsd_dec; +}; /** struct snd_codec_desc - description of codec capabilities * @max_ch: Maximum number of audio channels @@ -384,6 +514,7 @@ struct snd_codec_desc { * @align: Block alignment in bytes of an audio sample. * Only required for PCM or IEC formats. * @options: encoder-specific settings + * @compr_passthr: compressed bitstream passthrough * @reserved: reserved for future use */ @@ -400,7 +531,26 @@ struct snd_codec { __u32 format; __u32 align; union snd_codec_options options; - __u32 reserved[3]; + __u32 compr_passthr; + __u32 flags; + __u32 reserved[1]; } __attribute__((packed, aligned(4))); +#define SND_CODEC_COMPRESS_PASSTHROUGH + +/** struct snd_codec_metadata + * @length: Length of the encoded buffer. + * @offset: Offset from the buffer address to the first byte of the first + * encoded frame. All encoded frames are consecutive starting + * from this offset. + * @timestamp: Session time in microseconds of the first sample in the buffer. + * @reserved: Reserved for future use. + */ +struct snd_codec_metadata { + __u32 length; + __u32 offset; + __u64 timestamp; + __u32 reserved[4]; +}; + #endif diff --git a/include/vdso/bits.h b/include/vdso/bits.h new file mode 100644 index 0000000000000000000000000000000000000000..3276a43ff1826a1ba9254074a0fef4acf76169f6 --- /dev/null +++ b/include/vdso/bits.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __VDSO_BITS_H +#define __VDSO_BITS_H + +#define BIT(nr) (1UL << (nr)) + +#endif /* __VDSO_BITS_H */ diff --git a/include/vdso/clocksource.h b/include/vdso/clocksource.h new file mode 100644 index 0000000000000000000000000000000000000000..c682e7c60273af1d130f375f4af4bf070e5041d0 --- /dev/null +++ b/include/vdso/clocksource.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __VDSO_CLOCKSOURCE_H +#define __VDSO_CLOCKSOURCE_H + +#include + +#ifdef CONFIG_GENERIC_GETTIMEOFDAY +#include +#endif /* CONFIG_GENERIC_GETTIMEOFDAY */ + +enum vdso_clock_mode { + VDSO_CLOCKMODE_NONE, +#ifdef CONFIG_GENERIC_GETTIMEOFDAY + VDSO_ARCH_CLOCKMODES, +#endif + VDSO_CLOCKMODE_MAX, + + /* Indicator for time namespace VDSO */ + VDSO_CLOCKMODE_TIMENS = INT_MAX +}; + +#endif /* __VDSO_CLOCKSOURCE_H */ diff --git a/include/vdso/const.h b/include/vdso/const.h new file mode 100644 index 0000000000000000000000000000000000000000..94b385ad438d3c0e1f120c89a9147cdbac1166b3 --- /dev/null +++ b/include/vdso/const.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __VDSO_CONST_H +#define __VDSO_CONST_H + +#include + +#define UL(x) (_UL(x)) +#define ULL(x) (_ULL(x)) + +#endif /* __VDSO_CONST_H */ diff --git a/include/vdso/datapage.h b/include/vdso/datapage.h new file mode 100644 index 0000000000000000000000000000000000000000..925c2acab6a6409c9a775bd0de505a78e8112942 --- /dev/null +++ b/include/vdso/datapage.h @@ -0,0 +1,121 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __VDSO_DATAPAGE_H +#define __VDSO_DATAPAGE_H + +#ifdef __KERNEL__ + +#ifndef __ASSEMBLY__ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define VDSO_BASES (CLOCK_TAI + 1) +#define VDSO_HRES (BIT(CLOCK_REALTIME) | \ + BIT(CLOCK_MONOTONIC) | \ + BIT(CLOCK_BOOTTIME) | \ + BIT(CLOCK_TAI)) +#define VDSO_COARSE (BIT(CLOCK_REALTIME_COARSE) | \ + BIT(CLOCK_MONOTONIC_COARSE)) +#define VDSO_RAW (BIT(CLOCK_MONOTONIC_RAW)) + +#define CS_HRES_COARSE 0 +#define CS_RAW 1 +#define CS_BASES (CS_RAW + 1) + +/** + * struct vdso_timestamp - basetime per clock_id + * @sec: seconds + * @nsec: nanoseconds + * + * There is one vdso_timestamp object in vvar for each vDSO-accelerated + * clock_id. For high-resolution clocks, this encodes the time + * corresponding to vdso_data.cycle_last. For coarse clocks this encodes + * the actual time. + * + * To be noticed that for highres clocks nsec is left-shifted by + * vdso_data.cs[x].shift. + */ +struct vdso_timestamp { + u64 sec; + u64 nsec; +}; + +/** + * struct vdso_data - vdso datapage representation + * @seq: timebase sequence counter + * @clock_mode: clock mode + * @cycle_last: timebase at clocksource init + * @mask: clocksource mask + * @mult: clocksource multiplier + * @shift: clocksource shift + * @basetime[clock_id]: basetime per clock_id + * @tz_minuteswest: minutes west of Greenwich + * @tz_dsttime: type of DST correction + * @hrtimer_res: hrtimer resolution + * @__unused: unused + * + * vdso_data will be accessed by 64 bit and compat code at the same time + * so we should be careful before modifying this structure. + */ +struct vdso_data { + u32 seq; + + s32 clock_mode; + u64 cycle_last; + u64 mask; + u32 mult; + u32 shift; + + struct vdso_timestamp basetime[VDSO_BASES]; + + s32 tz_minuteswest; + s32 tz_dsttime; + u32 hrtimer_res; + u32 __unused; +}; + +/* + * We use the hidden visibility to prevent the compiler from generating a GOT + * relocation. Not only is going through a GOT useless (the entry couldn't and + * must not be overridden by another library), it does not even work: the linker + * cannot generate an absolute address to the data page. + * + * With the hidden visibility, the compiler simply generates a PC-relative + * relocation, and this is what we need. + */ +extern struct vdso_data _vdso_data[CS_BASES] __attribute__((visibility("hidden"))); + +/* + * The generic vDSO implementation requires that gettimeofday.h + * provides: + * - __arch_get_vdso_data(): to get the vdso datapage. + * - __arch_get_hw_counter(): to get the hw counter based on the + * clock_mode. + * - gettimeofday_fallback(): fallback for gettimeofday. + * - clock_gettime_fallback(): fallback for clock_gettime. + * - clock_getres_fallback(): fallback for clock_getres. + */ +#ifdef ENABLE_COMPAT_VDSO +#include +#else +#include +#endif /* ENABLE_COMPAT_VDSO */ + +#endif /* !__ASSEMBLY__ */ + +#endif /* __KERNEL__ */ + +#endif /* __VDSO_DATAPAGE_H */ diff --git a/include/vdso/helpers.h b/include/vdso/helpers.h new file mode 100644 index 0000000000000000000000000000000000000000..01641dbb68ef09cf106746bea5722f740548b314 --- /dev/null +++ b/include/vdso/helpers.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __VDSO_HELPERS_H +#define __VDSO_HELPERS_H + +#ifndef __ASSEMBLY__ + +#include + +static __always_inline u32 vdso_read_begin(const struct vdso_data *vd) +{ + u32 seq; + + while ((seq = READ_ONCE(vd->seq)) & 1) + cpu_relax(); + + smp_rmb(); + return seq; +} + +static __always_inline u32 vdso_read_retry(const struct vdso_data *vd, + u32 start) +{ + u32 seq; + + smp_rmb(); + seq = READ_ONCE(vd->seq); + return seq != start; +} + +static __always_inline void vdso_write_begin(struct vdso_data *vd) +{ + /* + * WRITE_ONCE it is required otherwise the compiler can validly tear + * updates to vd[x].seq and it is possible that the value seen by the + * reader it is inconsistent. + */ + WRITE_ONCE(vd[CS_HRES_COARSE].seq, vd[CS_HRES_COARSE].seq + 1); + WRITE_ONCE(vd[CS_RAW].seq, vd[CS_RAW].seq + 1); + smp_wmb(); +} + +static __always_inline void vdso_write_end(struct vdso_data *vd) +{ + smp_wmb(); + /* + * WRITE_ONCE it is required otherwise the compiler can validly tear + * updates to vd[x].seq and it is possible that the value seen by the + * reader it is inconsistent. + */ + WRITE_ONCE(vd[CS_HRES_COARSE].seq, vd[CS_HRES_COARSE].seq + 1); + WRITE_ONCE(vd[CS_RAW].seq, vd[CS_RAW].seq + 1); +} + +#endif /* !__ASSEMBLY__ */ + +#endif /* __VDSO_HELPERS_H */ diff --git a/include/vdso/jiffies.h b/include/vdso/jiffies.h new file mode 100644 index 0000000000000000000000000000000000000000..2f9d596c8b2977faab39624eb43901fa4280432c --- /dev/null +++ b/include/vdso/jiffies.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __VDSO_JIFFIES_H +#define __VDSO_JIFFIES_H + +#include /* for HZ */ +#include + +/* TICK_NSEC is the time between ticks in nsec assuming SHIFTED_HZ */ +#define TICK_NSEC ((NSEC_PER_SEC+HZ/2)/HZ) + +#endif /* __VDSO_JIFFIES_H */ diff --git a/include/vdso/ktime.h b/include/vdso/ktime.h new file mode 100644 index 0000000000000000000000000000000000000000..a0fd07239e0e8400dc7715ef55f44ca3e61fd43b --- /dev/null +++ b/include/vdso/ktime.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __VDSO_KTIME_H +#define __VDSO_KTIME_H + +#include + +/* + * The resolution of the clocks. The resolution value is returned in + * the clock_getres() system call to give application programmers an + * idea of the (in)accuracy of timers. Timer values are rounded up to + * this resolution values. + */ +#define LOW_RES_NSEC TICK_NSEC +#define KTIME_LOW_RES (LOW_RES_NSEC) + +#endif /* __VDSO_KTIME_H */ diff --git a/include/vdso/limits.h b/include/vdso/limits.h new file mode 100644 index 0000000000000000000000000000000000000000..0197888ad0e00b2f853d3f25ffa764f61cca7385 --- /dev/null +++ b/include/vdso/limits.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __VDSO_LIMITS_H +#define __VDSO_LIMITS_H + +#define USHRT_MAX ((unsigned short)~0U) +#define SHRT_MAX ((short)(USHRT_MAX >> 1)) +#define SHRT_MIN ((short)(-SHRT_MAX - 1)) +#define INT_MAX ((int)(~0U >> 1)) +#define INT_MIN (-INT_MAX - 1) +#define UINT_MAX (~0U) +#define LONG_MAX ((long)(~0UL >> 1)) +#define LONG_MIN (-LONG_MAX - 1) +#define ULONG_MAX (~0UL) +#define LLONG_MAX ((long long)(~0ULL >> 1)) +#define LLONG_MIN (-LLONG_MAX - 1) +#define ULLONG_MAX (~0ULL) +#define UINTPTR_MAX ULONG_MAX + +#endif /* __VDSO_LIMITS_H */ diff --git a/include/vdso/math64.h b/include/vdso/math64.h new file mode 100644 index 0000000000000000000000000000000000000000..7da703ee55614a5fd7a0c7bd826dd034d463662b --- /dev/null +++ b/include/vdso/math64.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __VDSO_MATH64_H +#define __VDSO_MATH64_H + +static __always_inline u32 +__iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder) +{ + u32 ret = 0; + + while (dividend >= divisor) { + /* The following asm() prevents the compiler from + optimising this loop into a modulo operation. */ + asm("" : "+rm"(dividend)); + + dividend -= divisor; + ret++; + } + + *remainder = dividend; + + return ret; +} + +#endif /* __VDSO_MATH64_H */ diff --git a/include/vdso/processor.h b/include/vdso/processor.h new file mode 100644 index 0000000000000000000000000000000000000000..fbe8265ea3c49e0df1eeb59462d77cd1f652a707 --- /dev/null +++ b/include/vdso/processor.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2020 ARM Ltd. + */ +#ifndef __VDSO_PROCESSOR_H +#define __VDSO_PROCESSOR_H + +#ifndef __ASSEMBLY__ + +#include + +#endif /* __ASSEMBLY__ */ + +#endif /* __VDSO_PROCESSOR_H */ diff --git a/include/vdso/time.h b/include/vdso/time.h new file mode 100644 index 0000000000000000000000000000000000000000..e5afa84b47b41d762e02e495205fff1b8e880d24 --- /dev/null +++ b/include/vdso/time.h @@ -0,0 +1,5 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __VDSO_TIME_H +#define __VDSO_TIME_H + +#endif /* __VDSO_TIME_H */ diff --git a/include/vdso/time32.h b/include/vdso/time32.h new file mode 100644 index 0000000000000000000000000000000000000000..63fa969f736a64ff38c8bf616cc946eef1f329b3 --- /dev/null +++ b/include/vdso/time32.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __VDSO_TIME32_H +#define __VDSO_TIME32_H + +/* + * New aliases for compat time functions. These will be used to replace + * the compat code so it can be shared between 32-bit and 64-bit builds + * both of which provide compatibility with old 32-bit tasks. + */ +#define old_time32_t compat_time_t +#define old_timeval32 compat_timeval +#define old_timespec32 compat_timespec + +#endif /* __VDSO_TIME32_H */ diff --git a/include/vdso/time64.h b/include/vdso/time64.h new file mode 100644 index 0000000000000000000000000000000000000000..9d43c3f5e89d459ef0e5148b3dff004096a959ac --- /dev/null +++ b/include/vdso/time64.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __VDSO_TIME64_H +#define __VDSO_TIME64_H + +/* Parameters used to convert the timespec values: */ +#define MSEC_PER_SEC 1000L +#define USEC_PER_MSEC 1000L +#define NSEC_PER_USEC 1000L +#define NSEC_PER_MSEC 1000000L +#define USEC_PER_SEC 1000000L +#define NSEC_PER_SEC 1000000000L +#define FSEC_PER_SEC 1000000000000000LL + +#endif /* __VDSO_TIME64_H */ diff --git a/include/vdso/vsyscall.h b/include/vdso/vsyscall.h new file mode 100644 index 0000000000000000000000000000000000000000..2c6134e0c23d17dd8fda9c83c4763ab79c968e59 --- /dev/null +++ b/include/vdso/vsyscall.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __VDSO_VSYSCALL_H +#define __VDSO_VSYSCALL_H + +#ifndef __ASSEMBLY__ + +#include + +#endif /* !__ASSEMBLY__ */ + +#endif /* __VDSO_VSYSCALL_H */ diff --git a/init/Kconfig.gki b/init/Kconfig.gki index 82d0d65b373fdb1a08f28e256ae413ff9736f147..6d0c4fd140f1a6a692de7ce2d023ce28fb07ab00 100644 --- a/init/Kconfig.gki +++ b/init/Kconfig.gki @@ -3,6 +3,7 @@ config GKI_HIDDEN_DRM_CONFIGS select DRM_KMS_HELPER if (HAS_IOMEM && DRM) select DRM_GEM_CMA_HELPER select DRM_MIPI_DSI + select DRM_TTM if (HAS_IOMEM && DRM) select VIDEOMODE_HELPERS help Dummy config option used to enable hidden DRM configs. @@ -39,6 +40,7 @@ config GKI_HIDDEN_SND_CONFIGS config GKI_HIDDEN_SND_SOC_CONFIGS bool "Hidden SND_SOC configs needed for GKI" select SND_SOC_GENERIC_DMAENGINE_PCM if (SND_SOC && SND) + select SND_SOC_COMPRESS if (SND_SOC && SND) help Dummy config option used to enable hidden SND_SOC configs. These are normally selected implicitly when a module @@ -89,10 +91,11 @@ config GKI_HIDDEN_SOC_PM_CONFIGS config GKI_HIDDEN_VIDEOBUF2_CONFIGS bool "Hidden v4l2 configs for GKI" + select V4L2_MEM2MEM_DEV if VIDEO_V4L2 select VIDEOBUF2_CORE if VIDEO_V4L2 select VIDEOBUF2_V4L2 if VIDEO_V4L2 - select VIDEOBUF2_MEMOPS if VIDEO_V4L2 select VIDEOBUF2_VMALLOC if VIDEO_V4L2 + select VIDEOBUF2_MEMOPS if VIDEO_V4L2 help Dummy config option used to enable all the hidden v4l2 media module. @@ -117,6 +120,22 @@ config GKI_HIDDEN_SOC_BUS_CONFIGS If you are not building a kernel to be used for a variety of SoCs and out-of-tree drivers, say N here. +config GKI_HIDDEN_GPU_CONFIGS + bool "Hidden GPU configuration needed for GKI" + select TRACE_GPU_MEM + help + Dummy config option used to enable the hidden GPU config. + These are normally selected implicitly when a module + that relies on it is configured. + +config GKI_HIDDEN_IRQ_CONFIGS + bool "Hidden IRQ configuration needed for GKI" + select GENERIC_IRQ_CHIP + help + Dummy config option used to enable GENERIC_IRQ_CHIP hidden + config, required by various SoC platforms. This is usually + selected by ARCH_*. + # Atrocities needed for # a) building GKI modules in separate tree, or # b) building drivers that are not modularizable @@ -138,6 +157,8 @@ config GKI_HACKS_TO_FIX select GKI_HIDDEN_VIDEOBUF2_CONFIGS select GKI_HIDDEN_USB_CONFIGS select GKI_HIDDEN_SOC_BUS_CONFIGS + select GKI_HIDDEN_GPU_CONFIGS + select GKI_HIDDEN_IRQ_CONFIGS help Dummy config option used to enable core functionality used by modules that may not be selectable in this config. diff --git a/init/version.c b/init/version.c index 61bcdf3c00a1292fa3febaa93b39ddccb55b6dea..046b830abfe442ef3c10f8fe6af055502b829724 100644 --- a/init/version.c +++ b/init/version.c @@ -45,7 +45,9 @@ EXPORT_SYMBOL_GPL(init_uts_ns); const char linux_banner[] = "Linux version " UTS_RELEASE " (" LINUX_COMPILE_BY "@" LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION "\n"; -EXPORT_SYMBOL_GPL(linux_banner); + +const char *linux_banner_ptr = linux_banner; +EXPORT_SYMBOL_GPL(linux_banner_ptr); const char linux_proc_banner[] = "%s version %s" diff --git a/ipc/mqueue.c b/ipc/mqueue.c index 8763ee733e3c8c3379087545c8a80349dda7ee5e..75b10fe0bd7916363f886fcc0a7822a3509a2340 100644 --- a/ipc/mqueue.c +++ b/ipc/mqueue.c @@ -76,6 +76,7 @@ struct mqueue_inode_info { struct sigevent notify; struct pid *notify_owner; + u32 notify_self_exec_id; struct user_namespace *notify_user_ns; struct user_struct *user; /* user who created, for accounting */ struct sock *notify_sock; @@ -662,28 +663,44 @@ static void __do_notify(struct mqueue_inode_info *info) * synchronously. */ if (info->notify_owner && info->attr.mq_curmsgs == 1) { - struct siginfo sig_i; switch (info->notify.sigev_notify) { case SIGEV_NONE: break; - case SIGEV_SIGNAL: - /* sends signal */ + case SIGEV_SIGNAL: { + struct siginfo sig_i; + struct task_struct *task; + + /* do_mq_notify() accepts sigev_signo == 0, why?? */ + if (!info->notify.sigev_signo) + break; clear_siginfo(&sig_i); sig_i.si_signo = info->notify.sigev_signo; sig_i.si_errno = 0; sig_i.si_code = SI_MESGQ; sig_i.si_value = info->notify.sigev_value; - /* map current pid/uid into info->owner's namespaces */ rcu_read_lock(); + /* map current pid/uid into info->owner's namespaces */ sig_i.si_pid = task_tgid_nr_ns(current, ns_of_pid(info->notify_owner)); - sig_i.si_uid = from_kuid_munged(info->notify_user_ns, current_uid()); + sig_i.si_uid = from_kuid_munged(info->notify_user_ns, + current_uid()); + /* + * We can't use kill_pid_info(), this signal should + * bypass check_kill_permission(). It is from kernel + * but si_fromuser() can't know this. + * We do check the self_exec_id, to avoid sending + * signals to programs that don't expect them. + */ + task = pid_task(info->notify_owner, PIDTYPE_TGID); + if (task && task->self_exec_id == + info->notify_self_exec_id) { + do_send_sig_info(info->notify.sigev_signo, + &sig_i, task, PIDTYPE_TGID); + } rcu_read_unlock(); - - kill_pid_info(info->notify.sigev_signo, - &sig_i, info->notify_owner); break; + } case SIGEV_THREAD: set_cookie(info->notify_cookie, NOTIFY_WOKENUP); netlink_sendskb(info->notify_sock, info->notify_cookie); @@ -1273,6 +1290,7 @@ static int do_mq_notify(mqd_t mqdes, const struct sigevent *notification) info->notify.sigev_signo = notification->sigev_signo; info->notify.sigev_value = notification->sigev_value; info->notify.sigev_notify = SIGEV_SIGNAL; + info->notify_self_exec_id = current->self_exec_id; break; } diff --git a/ipc/util.c b/ipc/util.c index 0af05752969f1bc4641a591d717edd1b46a18b5a..b111e792b31257fe76f0c5b66bd4d0ad06bd71a7 100644 --- a/ipc/util.c +++ b/ipc/util.c @@ -735,13 +735,13 @@ static struct kern_ipc_perm *sysvipc_find_ipc(struct ipc_ids *ids, loff_t pos, total++; } + *new_pos = pos + 1; if (total >= ids->in_use) return NULL; for (; pos < IPCMNI; pos++) { ipc = idr_find(&ids->ipcs_idr, pos); if (ipc != NULL) { - *new_pos = pos + 1; rcu_read_lock(); ipc_lock_object(ipc); return ipc; diff --git a/kernel/audit.c b/kernel/audit.c index 1f08c38e604a92d3aa7a65b4960d9350309c88e6..7afec5f43c632814a49aaca770cb5414c6407ae2 100644 --- a/kernel/audit.c +++ b/kernel/audit.c @@ -1331,6 +1331,9 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2: if (!audit_enabled && msg_type != AUDIT_USER_AVC) return 0; + /* exit early if there isn't at least one character to print */ + if (data_len < 2) + return -EINVAL; err = audit_filter(msg_type, AUDIT_FILTER_USER); if (err == 1) { /* match or error */ diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index afee05e3cc1b430335c4e37cbb7bb86c1650df94..bdfb74d85e22b96f200a409cec13aab78eef269a 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -626,7 +626,7 @@ bool __weak arch_bpf_jit_check_func(const struct bpf_prog *prog) { return true; } -EXPORT_SYMBOL(arch_bpf_jit_check_func); +EXPORT_SYMBOL_GPL(arch_bpf_jit_check_func); #endif struct bpf_binary_header * diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c index 3c18260403dde1df951448c600b6ef9ac61f5635..61fbcae82f0a1c9a2637856fa67265908392f7e6 100644 --- a/kernel/bpf/cpumap.c +++ b/kernel/bpf/cpumap.c @@ -455,7 +455,7 @@ static int cpu_map_update_elem(struct bpf_map *map, void *key, void *value, return -EOVERFLOW; /* Make sure CPU is a valid possible cpu */ - if (!cpu_possible(key_cpu)) + if (key_cpu >= nr_cpumask_bits || !cpu_possible(key_cpu)) return -ENODEV; if (qsize == 0) { diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index e85636fb81b9c2db7d61b0a5755984a71d96fe8b..daf0a9637d73a4b6546b2a8442b20e5ccc445ce2 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -188,8 +188,7 @@ struct bpf_call_arg_meta { bool pkt_access; int regno; int access_size; - s64 msize_smax_value; - u64 msize_umax_value; + u64 msize_max_value; }; static DEFINE_MUTEX(bpf_verifier_lock); @@ -2076,8 +2075,7 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno, /* remember the mem_size which may be used later * to refine return values. */ - meta->msize_smax_value = reg->smax_value; - meta->msize_umax_value = reg->umax_value; + meta->msize_max_value = reg->umax_value; /* The register is SCALAR_VALUE; the access check * happens using its boundaries. @@ -2448,21 +2446,44 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx) return 0; } -static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type, - int func_id, - struct bpf_call_arg_meta *meta) +static int do_refine_retval_range(struct bpf_verifier_env *env, + struct bpf_reg_state *regs, int ret_type, + int func_id, struct bpf_call_arg_meta *meta) { struct bpf_reg_state *ret_reg = ®s[BPF_REG_0]; + struct bpf_reg_state tmp_reg = *ret_reg; + bool ret; if (ret_type != RET_INTEGER || (func_id != BPF_FUNC_get_stack && func_id != BPF_FUNC_probe_read_str)) - return; + return 0; + + /* Error case where ret is in interval [S32MIN, -1]. */ + ret_reg->smin_value = S32_MIN; + ret_reg->smax_value = -1; + + __reg_deduce_bounds(ret_reg); + __reg_bound_offset(ret_reg); + __update_reg_bounds(ret_reg); + + ret = push_stack(env, env->insn_idx + 1, env->insn_idx, false); + if (!ret) + return -EFAULT; + + *ret_reg = tmp_reg; + + /* Success case where ret is in range [0, msize_max_value]. */ + ret_reg->smin_value = 0; + ret_reg->smax_value = meta->msize_max_value; + ret_reg->umin_value = ret_reg->smin_value; + ret_reg->umax_value = ret_reg->smax_value; - ret_reg->smax_value = meta->msize_smax_value; - ret_reg->umax_value = meta->msize_umax_value; __reg_deduce_bounds(ret_reg); __reg_bound_offset(ret_reg); + __update_reg_bounds(ret_reg); + + return 0; } static int @@ -2617,7 +2638,9 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn return -EINVAL; } - do_refine_retval_range(regs, fn->ret_type, func_id, &meta); + err = do_refine_retval_range(env, regs, fn->ret_type, func_id, &meta); + if (err) + return err; err = check_map_func_compatibility(env, meta.map_ptr, func_id); if (err) diff --git a/kernel/cfi.c b/kernel/cfi.c index 9e9992258b481e6ddc4d233aeab8f18e11299d84..5092eaa403c79334041ac85a62e02537b58dbebc 100644 --- a/kernel/cfi.c +++ b/kernel/cfi.c @@ -215,14 +215,14 @@ void cfi_module_add(struct module *mod, unsigned long min_addr, { update_shadow(mod, min_addr, max_addr, add_module_to_shadow); } -EXPORT_SYMBOL(cfi_module_add); +EXPORT_SYMBOL_GPL(cfi_module_add); void cfi_module_remove(struct module *mod, unsigned long min_addr, unsigned long max_addr) { update_shadow(mod, min_addr, max_addr, remove_module_from_shadow); } -EXPORT_SYMBOL(cfi_module_remove); +EXPORT_SYMBOL_GPL(cfi_module_remove); static inline cfi_check_fn ptr_to_check_fn(const struct cfi_shadow __rcu *s, unsigned long ptr) @@ -291,14 +291,14 @@ void cfi_slowpath_handler(uint64_t id, void *ptr, void *diag) else /* Don't allow unchecked modules */ handle_cfi_failure(ptr); } -EXPORT_SYMBOL(cfi_slowpath_handler); +EXPORT_SYMBOL_GPL(cfi_slowpath_handler); #endif /* CONFIG_MODULES */ void cfi_failure_handler(void *data, void *ptr, void *vtable) { handle_cfi_failure(ptr); } -EXPORT_SYMBOL(cfi_failure_handler); +EXPORT_SYMBOL_GPL(cfi_failure_handler); void __cfi_check_fail(void *data, void *ptr) { diff --git a/kernel/cpu.c b/kernel/cpu.c index a94bf1e1bc058899bc9d195a3403cdef48e6cba3..a20284da61916362fcbf72b39b917e5e6a0c57d0 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -2077,10 +2077,8 @@ int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval) */ cpuhp_offline_cpu_device(cpu); } - if (!ret) { + if (!ret) cpu_smt_control = ctrlval; - arch_smt_update(); - } cpu_maps_update_done(); return ret; } @@ -2091,7 +2089,6 @@ int cpuhp_smt_enable(void) cpu_maps_update_begin(); cpu_smt_control = CPU_SMT_ENABLED; - arch_smt_update(); for_each_present_cpu(cpu) { /* Skip online CPUs and CPUs on offline nodes */ if (cpu_online(cpu) || !node_online(cpu_to_node(cpu))) diff --git a/kernel/dma/coherent.c b/kernel/dma/coherent.c index 597d40893862696ed76457c7071c8d5fd074f612..83e48618b6ca138933398ee41af4e8d2878aaccd 100644 --- a/kernel/dma/coherent.c +++ b/kernel/dma/coherent.c @@ -29,14 +29,21 @@ static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device *de return NULL; } -static inline dma_addr_t dma_get_device_base(struct device *dev, - struct dma_coherent_mem * mem) +dma_addr_t dma_get_device_base(struct device *dev, + struct dma_coherent_mem *mem) { if (mem->use_dev_dma_pfn_offset) return (mem->pfn_base - dev->dma_pfn_offset) << PAGE_SHIFT; else return mem->device_base; } +EXPORT_SYMBOL_GPL(dma_get_device_base); + +unsigned long dma_get_size(struct dma_coherent_mem *mem) +{ + return mem->size << PAGE_SHIFT; +} +EXPORT_SYMBOL_GPL(dma_get_size); static int dma_init_coherent_memory( phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags, diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c index d2a92ddaac4d14c8683433856672fddbace7a4c9..44f47d87dfd08aac2bb808f904154bf9879396fa 100644 --- a/kernel/dma/mapping.c +++ b/kernel/dma/mapping.c @@ -291,11 +291,12 @@ void *dma_common_contiguous_remap(struct page *page, size_t size, unsigned long vm_flags, pgprot_t prot, const void *caller) { - int i; + unsigned long i; struct page **pages; struct vm_struct *area; - pages = kmalloc(sizeof(struct page *) << get_order(size), GFP_KERNEL); + pages = kvmalloc(sizeof(struct page *) << get_order(size), GFP_KERNEL); + if (!pages) return NULL; @@ -304,7 +305,7 @@ void *dma_common_contiguous_remap(struct page *page, size_t size, area = __dma_common_pages_remap(pages, size, vm_flags, prot, caller); - kfree(pages); + kvfree(pages); if (!area) return NULL; @@ -314,12 +315,14 @@ void *dma_common_contiguous_remap(struct page *page, size_t size, /* * unmaps a range previously mapped by dma_common_*_remap */ -void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags) +void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags, + bool no_warn) { struct vm_struct *area = find_vm_area(cpu_addr); if (!area || (area->flags & vm_flags) != vm_flags) { - WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr); + WARN(!no_warn, "trying to free invalid coherent area: %p\n", + cpu_addr); return; } diff --git a/kernel/events/core.c b/kernel/events/core.c index 4b01efe468981758a5a60b92c2098d180b28d739..08c8d20549ff4db802ad8d0d7ec46939b3f19b41 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -6427,9 +6427,12 @@ static u64 perf_virt_to_phys(u64 virt) * Try IRQ-safe __get_user_pages_fast first. * If failed, leave phys_addr as 0. */ - if ((current->mm != NULL) && - (__get_user_pages_fast(virt, 1, 0, &p) == 1)) - phys_addr = page_to_phys(p) + virt % PAGE_SIZE; + if (current->mm != NULL) { + pagefault_disable(); + if (__get_user_pages_fast(virt, 1, 0, &p) == 1) + phys_addr = page_to_phys(p) + virt % PAGE_SIZE; + pagefault_enable(); + } if (p) put_page(p); @@ -6936,10 +6939,17 @@ static void perf_event_task_output(struct perf_event *event, goto out; task_event->event_id.pid = perf_event_pid(event, task); - task_event->event_id.ppid = perf_event_pid(event, current); - task_event->event_id.tid = perf_event_tid(event, task); - task_event->event_id.ptid = perf_event_tid(event, current); + + if (task_event->event_id.header.type == PERF_RECORD_EXIT) { + task_event->event_id.ppid = perf_event_pid(event, + task->real_parent); + task_event->event_id.ptid = perf_event_pid(event, + task->real_parent); + } else { /* PERF_RECORD_FORK */ + task_event->event_id.ppid = perf_event_pid(event, current); + task_event->event_id.ptid = perf_event_tid(event, current); + } task_event->event_id.time = perf_event_clock(event); diff --git a/kernel/gcov/fs.c b/kernel/gcov/fs.c index 6e40ff6be083dab77ad46edc01dbd95e4c0a854f..291e0797125b694334db84869b5db3676531d335 100644 --- a/kernel/gcov/fs.c +++ b/kernel/gcov/fs.c @@ -109,9 +109,9 @@ static void *gcov_seq_next(struct seq_file *seq, void *data, loff_t *pos) { struct gcov_iterator *iter = data; + (*pos)++; if (gcov_iter_next(iter)) return NULL; - (*pos)++; return iter; } diff --git a/kernel/gen_kheaders.sh b/kernel/gen_kheaders.sh index 9a34e1d9bd7f97eb82e6ceb0042c7f5502742041..9ff449888d9cda491a39b0ffeba27c31d47a72b3 100755 --- a/kernel/gen_kheaders.sh +++ b/kernel/gen_kheaders.sh @@ -4,24 +4,12 @@ # This script generates an archive consisting of kernel headers # for CONFIG_IKHEADERS. set -e -spath="$(dirname "$(readlink -f "$0")")" -kroot="$spath/.." +sfile="$(readlink -f "$0")" outdir="$(pwd)" tarfile=$1 cpio_dir=$outdir/$tarfile.tmp -# Script filename relative to the kernel source root -# We add it to the archive because it is small and any changes -# to this script will also cause a rebuild of the archive. -sfile="$(realpath --relative-to $kroot "$(readlink -f "$0")")" - -src_file_list=" -include/ -arch/$SRCARCH/include/ -$sfile -" - -obj_file_list=" +dir_list=" include/ arch/$SRCARCH/include/ " @@ -33,33 +21,29 @@ arch/$SRCARCH/include/ # Uncomment it for debugging. # if [ ! -f /tmp/iter ]; then iter=1; echo 1 > /tmp/iter; # else iter=$(($(cat /tmp/iter) + 1)); echo $iter > /tmp/iter; fi -# find $src_file_list -type f | xargs ls -lR > /tmp/src-ls-$iter -# find $obj_file_list -type f | xargs ls -lR > /tmp/obj-ls-$iter +# find $src_file_list -name "*.h" | xargs ls -l > /tmp/src-ls-$iter +# find $obj_file_list -name "*.h" | xargs ls -l > /tmp/obj-ls-$iter # include/generated/compile.h is ignored because it is touched even when none # of the source files changed. This causes pointless regeneration, so let us # ignore them for md5 calculation. -pushd $kroot > /dev/null -src_files_md5="$(find $src_file_list -type f | +pushd $srctree > /dev/null +src_files_md5="$(find $dir_list -name "*.h" | grep -v "include/generated/compile.h" | grep -v "include/generated/autoconf.h" | - grep -v "include/config/auto.conf" | - grep -v "include/config/auto.conf.cmd" | - grep -v "include/config/tristate.conf" | - xargs ls -lR | md5sum | cut -d ' ' -f1)" + xargs ls -l | md5sum | cut -d ' ' -f1)" popd > /dev/null -obj_files_md5="$(find $obj_file_list -type f | +obj_files_md5="$(find $dir_list -name "*.h" | grep -v "include/generated/compile.h" | grep -v "include/generated/autoconf.h" | - grep -v "include/config/auto.conf" | - grep -v "include/config/auto.conf.cmd" | - grep -v "include/config/tristate.conf" | - xargs ls -lR | md5sum | cut -d ' ' -f1)" - + xargs ls -l | md5sum | cut -d ' ' -f1)" +# Any changes to this script will also cause a rebuild of the archive. +this_file_md5="$(ls -l $sfile | md5sum | cut -d ' ' -f1)" if [ -f $tarfile ]; then tarfile_md5="$(md5sum $tarfile | cut -d ' ' -f1)"; fi if [ -f kernel/kheaders.md5 ] && [ "$(cat kernel/kheaders.md5|head -1)" == "$src_files_md5" ] && [ "$(cat kernel/kheaders.md5|head -2|tail -1)" == "$obj_files_md5" ] && + [ "$(cat kernel/kheaders.md5|head -3|tail -1)" == "$this_file_md5" ] && [ "$(cat kernel/kheaders.md5|tail -1)" == "$tarfile_md5" ]; then exit fi @@ -71,16 +55,16 @@ fi rm -rf $cpio_dir mkdir $cpio_dir -pushd $kroot > /dev/null -for f in $src_file_list; - do find "$f" ! -name "*.cmd" ! -name ".*"; +pushd $srctree > /dev/null +for f in $dir_list; + do find "$f" -name "*.h"; done | cpio --quiet -pd $cpio_dir popd > /dev/null # The second CPIO can complain if files already exist which can # happen with out of tree builds. Just silence CPIO for now. -for f in $obj_file_list; - do find "$f" ! -name "*.cmd" ! -name ".*"; +for f in $dir_list; + do find "$f" -name "*.h"; done | cpio --quiet -pd $cpio_dir >/dev/null 2>&1 # Remove comments except SDPX lines @@ -91,6 +75,7 @@ tar -Jcf $tarfile -C $cpio_dir/ . > /dev/null echo "$src_files_md5" > kernel/kheaders.md5 echo "$obj_files_md5" >> kernel/kheaders.md5 +echo "$this_file_md5" >> kernel/kheaders.md5 echo "$(md5sum $tarfile | cut -d ' ' -f1)" >> kernel/kheaders.md5 rm -rf $cpio_dir diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c index 065b1ec313d963ecd10e0aa870be6380f46f8f7c..585aa59f06f2eb78c4e94f61f5b00d2604b7f80c 100644 --- a/kernel/irq/irqdomain.c +++ b/kernel/irq/irqdomain.c @@ -1257,6 +1257,11 @@ int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain, unsigned int irq_base, unsigned int nr_irqs, void *arg) { + if (!domain->ops->alloc) { + pr_debug("domain->ops->alloc() is NULL\n"); + return -ENOSYS; + } + return domain->ops->alloc(domain, irq_base, nr_irqs, arg); } @@ -1294,11 +1299,6 @@ int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base, return -EINVAL; } - if (!domain->ops->alloc) { - pr_debug("domain->ops->alloc() is NULL\n"); - return -ENOSYS; - } - if (realloc && irq_base >= 0) { virq = irq_base; } else { diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 1e272f6a01e73e0359a874ef580a623c1af59b3d..8a1758b094b7007a47760a3089396dfeb2ae3667 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -1260,9 +1260,11 @@ unsigned long lockdep_count_forward_deps(struct lock_class *class) this.class = class; raw_local_irq_save(flags); + current->lockdep_recursion = 1; arch_spin_lock(&lockdep_lock); ret = __lockdep_count_forward_deps(&this); arch_spin_unlock(&lockdep_lock); + current->lockdep_recursion = 0; raw_local_irq_restore(flags); return ret; @@ -1287,9 +1289,11 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class) this.class = class; raw_local_irq_save(flags); + current->lockdep_recursion = 1; arch_spin_lock(&lockdep_lock); ret = __lockdep_count_backward_deps(&this); arch_spin_unlock(&lockdep_lock); + current->lockdep_recursion = 0; raw_local_irq_restore(flags); return ret; diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c index 7d0b0ed744042d827886554951a24329879b93f0..95395ef5922ac9262211921507a00a452c332aaf 100644 --- a/kernel/locking/locktorture.c +++ b/kernel/locking/locktorture.c @@ -710,10 +710,10 @@ static void __torture_print_stats(char *page, if (statp[i].n_lock_fail) fail = true; sum += statp[i].n_lock_acquired; - if (max < statp[i].n_lock_fail) - max = statp[i].n_lock_fail; - if (min > statp[i].n_lock_fail) - min = statp[i].n_lock_fail; + if (max < statp[i].n_lock_acquired) + max = statp[i].n_lock_acquired; + if (min > statp[i].n_lock_acquired) + min = statp[i].n_lock_acquired; } page += sprintf(page, "%s: Total: %lld Max/Min: %ld/%ld %s Fail: %d %s\n", diff --git a/kernel/padata.c b/kernel/padata.c index cfab62923c452f750e68a21b286592093f5591ef..c280cb153915fbf1bf117550a743486309d70881 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -671,8 +671,8 @@ int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type, struct cpumask *serial_mask, *parallel_mask; int err = -EINVAL; - mutex_lock(&pinst->lock); get_online_cpus(); + mutex_lock(&pinst->lock); switch (cpumask_type) { case PADATA_CPU_PARALLEL: @@ -690,8 +690,8 @@ int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type, err = __padata_set_cpumasks(pinst, parallel_mask, serial_mask); out: - put_online_cpus(); mutex_unlock(&pinst->lock); + put_online_cpus(); return err; } diff --git a/kernel/panic.c b/kernel/panic.c index 8138a676fb7d1b4f59f82c18cebc8cecea3f3d59..6b334699ddbdcf037923ec685f6da475ec2bf521 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -47,9 +47,11 @@ int panic_timeout = CONFIG_PANIC_TIMEOUT; EXPORT_SYMBOL_GPL(panic_timeout); ATOMIC_NOTIFIER_HEAD(panic_notifier_list); - EXPORT_SYMBOL(panic_notifier_list); +void (*vendor_panic_cb)(u64 sp); +EXPORT_SYMBOL_GPL(vendor_panic_cb); + static long no_blink(int state) { return 0; @@ -177,6 +179,8 @@ void panic(const char *fmt, ...) va_start(args, fmt); vsnprintf(buf, sizeof(buf), fmt, args); va_end(args); + if (vendor_panic_cb) + vendor_panic_cb(0); pr_emerg("Kernel panic - not syncing: %s\n", buf); #ifdef CONFIG_DEBUG_BUGVERBOSE /* @@ -276,6 +280,8 @@ void panic(const char *fmt, ...) * shutting down. But if there is a chance of * rebooting the system it will be rebooted. */ + if (panic_reboot_mode != REBOOT_UNDEFINED) + reboot_mode = panic_reboot_mode; emergency_restart(); } #ifdef __sparc__ diff --git a/kernel/params.c b/kernel/params.c index ce89f757e6da04ab0f8ee97c66e4dbce1033dc94..bd8d46c279f0bf7ae4337b5d5ed1fdd331dcc990 100644 --- a/kernel/params.c +++ b/kernel/params.c @@ -924,6 +924,7 @@ static const struct kset_uevent_ops module_uevent_ops = { }; struct kset *module_kset; +EXPORT_SYMBOL_GPL(module_kset); int module_sysfs_initialized; static void module_kobj_release(struct kobject *kobj) @@ -936,6 +937,7 @@ struct kobj_type module_ktype = { .release = module_kobj_release, .sysfs_ops = &module_sysfs_ops, }; +EXPORT_SYMBOL_GPL(module_ktype); /* * param_sysfs_init - wrapper for built-in params support diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index f5ce9f7ec132d3aab21103ec60c9e74ccce60aff..537a2a3c1dea267b1137008688dc1c0573f9d9a6 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c @@ -901,6 +901,13 @@ static int software_resume(void) error = freeze_processes(); if (error) goto Close_Finish; + + error = freeze_kernel_threads(); + if (error) { + thaw_processes(); + goto Close_Finish; + } + error = load_image_and_restore(); thaw_processes(); Finish: diff --git a/kernel/power/main.c b/kernel/power/main.c index 6bcb47d366af8227783aab160162a744859e1196..1ee81334d689d3078af1ff7c291833e5a51464b1 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c @@ -535,6 +535,7 @@ static inline void pm_print_times_init(void) {} #endif /* CONFIG_PM_SLEEP_DEBUG */ struct kobject *power_kobj; +EXPORT_SYMBOL_GPL(power_kobj); /** * state - control system sleep states. diff --git a/kernel/power/qos.c b/kernel/power/qos.c index 035fce171345b014f34c12fa4b74554b74c0c317..789dd20b66696be0da0c58c0647669bd095d9323 100644 --- a/kernel/power/qos.c +++ b/kernel/power/qos.c @@ -574,9 +574,13 @@ void pm_qos_add_request(struct pm_qos_request *req, #ifdef CONFIG_SMP case PM_QOS_REQ_AFFINE_IRQ: if (irq_can_set_affinity(req->irq)) { - int ret = 0; struct irq_desc *desc = irq_to_desc(req->irq); - struct cpumask *mask = desc->irq_data.common->affinity; + struct cpumask *mask; + + if (!desc) + return; + + mask = desc->irq_data.common->affinity; /* Get the current affinity */ cpumask_copy(&req->cpus_affine, mask); @@ -584,13 +588,6 @@ void pm_qos_add_request(struct pm_qos_request *req, req->irq_notify.notify = pm_qos_irq_notify; req->irq_notify.release = pm_qos_irq_release; - ret = irq_set_affinity_notifier(req->irq, - &req->irq_notify); - if (ret) { - WARN(1, "IRQ affinity notify set failed\n"); - req->type = PM_QOS_REQ_ALL_CORES; - cpumask_setall(&req->cpus_affine); - } } else { req->type = PM_QOS_REQ_ALL_CORES; cpumask_setall(&req->cpus_affine); @@ -612,6 +609,24 @@ void pm_qos_add_request(struct pm_qos_request *req, trace_pm_qos_add_request(pm_qos_class, value); pm_qos_update_target(pm_qos_array[pm_qos_class]->constraints, &req->node, PM_QOS_ADD_REQ, value); + +#ifdef CONFIG_SMP + if (req->type == PM_QOS_REQ_AFFINE_IRQ && + irq_can_set_affinity(req->irq)) { + int ret = 0; + + ret = irq_set_affinity_notifier(req->irq, + &req->irq_notify); + if (ret) { + WARN(1, "IRQ affinity notify set failed\n"); + req->type = PM_QOS_REQ_ALL_CORES; + cpumask_setall(&req->cpus_affine); + pm_qos_update_target( + pm_qos_array[pm_qos_class]->constraints, + &req->node, PM_QOS_UPDATE_REQ, value); + } + } +#endif } EXPORT_SYMBOL_GPL(pm_qos_add_request); @@ -669,6 +684,7 @@ void pm_qos_update_request_timeout(struct pm_qos_request *req, s32 new_value, schedule_delayed_work(&req->work, usecs_to_jiffies(timeout_us)); } +EXPORT_SYMBOL_GPL(pm_qos_update_request_timeout); /** * pm_qos_remove_request - modifies an existing qos request diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index a85938dba6a511b18aad56c5146955d5672203b4..365d1e81fed7c6ea96c29c8eecc03927326c006e 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c @@ -76,6 +76,7 @@ void s2idle_set_ops(const struct platform_s2idle_ops *ops) s2idle_ops = ops; unlock_system_sleep(); } +EXPORT_SYMBOL_GPL(s2idle_set_ops); static void s2idle_begin(void) { diff --git a/kernel/power/wakeup_reason.c b/kernel/power/wakeup_reason.c index 503a71fc49fc75379a7853617431b82833be5ca6..3c118c0446330f4b43a93811da79828edf6e2288 100644 --- a/kernel/power/wakeup_reason.c +++ b/kernel/power/wakeup_reason.c @@ -102,7 +102,7 @@ static void delete_list(struct list_head *head) static bool add_sibling_node_sorted(struct list_head *head, int irq) { - struct wakeup_irq_node *n; + struct wakeup_irq_node *n = NULL; struct list_head *predecessor = head; if (unlikely(WARN_ON(!head))) @@ -196,7 +196,8 @@ void log_threaded_irq_wakeup_reason(int irq, int parent_irq) spin_unlock_irqrestore(&wakeup_reason_lock, flags); } -void __log_abort_or_abnormal_wake(bool abort, const char *fmt, va_list args) +static void __log_abort_or_abnormal_wake(bool abort, const char *fmt, + va_list args) { unsigned long flags; @@ -330,8 +331,10 @@ static ssize_t last_suspend_time_show(struct kobject *kobj, /* Export suspend_resume_time and sleep_time in pair here. */ return sprintf(buf, "%llu.%09lu %llu.%09lu\n", - suspend_resume_time.tv_sec, suspend_resume_time.tv_nsec, - sleep_time.tv_sec, sleep_time.tv_nsec); + (unsigned long long)suspend_resume_time.tv_sec, + suspend_resume_time.tv_nsec, + (unsigned long long)sleep_time.tv_sec, + sleep_time.tv_nsec); } static struct kobj_attribute resume_reason = __ATTR_RO(last_resume_reason); @@ -375,7 +378,7 @@ static struct notifier_block wakeup_reason_pm_notifier_block = { .notifier_call = wakeup_reason_pm_event, }; -int __init wakeup_reason_init(void) +static int __init wakeup_reason_init(void) { if (register_pm_notifier(&wakeup_reason_pm_notifier_block)) { pr_warn("[%s] failed to register PM notifier\n", __func__); diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index 7a2fdc097c8cb4e6ca1ac895fdc0b2a519f65fc9..e44c7a4e89c7e35219b2d29339508a7b935436f5 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -448,12 +448,14 @@ char *log_buf_addr_get(void) { return log_buf; } +EXPORT_SYMBOL_GPL(log_buf_addr_get); /* Return log buffer size */ u32 log_buf_len_get(void) { return log_buf_len; } +EXPORT_SYMBOL_GPL(log_buf_len_get); /* human readable text of the record */ static char *log_text(const struct printk_log *msg) diff --git a/kernel/reboot.c b/kernel/reboot.c index 8fb44dec9ad75799e9269504ddea7083db654f9b..4fef5894b9d2d47b98b938997b1882904b9e8351 100644 --- a/kernel/reboot.c +++ b/kernel/reboot.c @@ -31,6 +31,7 @@ EXPORT_SYMBOL(cad_pid); #define DEFAULT_REBOOT_MODE #endif enum reboot_mode reboot_mode DEFAULT_REBOOT_MODE; +enum reboot_mode panic_reboot_mode = REBOOT_UNDEFINED; /* * This variable is used privately to keep track of whether or not @@ -518,6 +519,8 @@ EXPORT_SYMBOL_GPL(orderly_reboot); static int __init reboot_setup(char *str) { for (;;) { + enum reboot_mode *mode; + /* * Having anything passed on the command line via * reboot= will cause us to disable DMI checking @@ -525,17 +528,24 @@ static int __init reboot_setup(char *str) */ reboot_default = 0; + if (!strncmp(str, "panic_", 6)) { + mode = &panic_reboot_mode; + str += 6; + } else { + mode = &reboot_mode; + } + switch (*str) { case 'w': - reboot_mode = REBOOT_WARM; + *mode = REBOOT_WARM; break; case 'c': - reboot_mode = REBOOT_COLD; + *mode = REBOOT_COLD; break; case 'h': - reboot_mode = REBOOT_HARD; + *mode = REBOOT_HARD; break; case 's': @@ -552,11 +562,11 @@ static int __init reboot_setup(char *str) if (rc) return rc; } else - reboot_mode = REBOOT_SOFT; + *mode = REBOOT_SOFT; break; } case 'g': - reboot_mode = REBOOT_GPIO; + *mode = REBOOT_GPIO; break; case 'b': diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 832b7631668dba4914be30ae79761473f0a63510..872fdfa8cf92869487771e9676e927c522676073 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1192,17 +1192,8 @@ static void uclamp_fork(struct task_struct *p) return; for_each_clamp_id(clamp_id) { - unsigned int clamp_value = uclamp_none(clamp_id); - - /* By default, RT tasks always get 100% boost */ - if (sched_feat(SUGOV_RT_MAX_FREQ) && - unlikely(rt_task(p) && - clamp_id == UCLAMP_MIN)) { - - clamp_value = uclamp_none(UCLAMP_MAX); - } - - uclamp_se_set(&p->uclamp_req[clamp_id], clamp_value, false); + uclamp_se_set(&p->uclamp_req[clamp_id], + uclamp_none(clamp_id), false); } } diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 09d220d6be0570869265e385a7e282b99b243694..9e0cc7385eb87efe7220fc91dd339a8946c61979 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -127,7 +127,13 @@ static inline void cpu_load_update_active(struct rq *this_rq) { } #ifdef CONFIG_64BIT # define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT) # define scale_load(w) ((w) << SCHED_FIXEDPOINT_SHIFT) -# define scale_load_down(w) ((w) >> SCHED_FIXEDPOINT_SHIFT) +# define scale_load_down(w) \ +({ \ + unsigned long __w = (w); \ + if (__w) \ + __w = max(2UL, __w >> SCHED_FIXEDPOINT_SHIFT); \ + __w; \ +}) #else # define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT) # define scale_load(w) (w) @@ -794,6 +800,12 @@ struct root_domain { * CPUs of the rd. Protected by RCU. */ struct perf_domain *pd; + + /* Vendor fields. */ + /* First cpu with maximum and minimum original capacity */ + int max_cap_orig_cpu, min_cap_orig_cpu; + /* First cpu with mid capacity */ + int mid_cap_orig_cpu; }; extern struct root_domain def_root_domain; diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c index 5dd47f1103d18bd769cace68b753088574e02f27..0de9dcfde49505d55fb2384a7ab5fc314a512308 100644 --- a/kernel/sched/wait.c +++ b/kernel/sched/wait.c @@ -370,7 +370,8 @@ void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_en } EXPORT_SYMBOL(finish_wait); -int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key) +int __sched autoremove_wake_function(struct wait_queue_entry *wq_entry, + unsigned int mode, int sync, void *key) { int ret = default_wake_function(wq_entry, mode, sync, key); @@ -406,7 +407,8 @@ static inline bool is_kthread_should_stop(void) * } smp_mb(); // C * remove_wait_queue(&wq_head, &wait); wq_entry->flags |= WQ_FLAG_WOKEN; */ -long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout) +long __sched wait_woken(struct wait_queue_entry *wq_entry, unsigned int mode, + long timeout) { /* * The below executes an smp_mb(), which matches with the full barrier @@ -431,7 +433,8 @@ long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout) } EXPORT_SYMBOL(wait_woken); -int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key) +int __sched woken_wake_function(struct wait_queue_entry *wq_entry, + unsigned int mode, int sync, void *key) { /* Pairs with the smp_store_mb() in wait_woken(). */ smp_mb(); /* C */ diff --git a/kernel/signal.c b/kernel/signal.c index b3823bd2ff01983bfe1364480f0627bb8efaa85f..fbcf1f3780dad2ad6520fed296b7cac36e7686bc 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -1850,7 +1850,7 @@ bool do_notify_parent(struct task_struct *tsk, int sig) * This is only possible if parent == real_parent. * Check if it has changed security domain. */ - if (tsk->parent_exec_id != tsk->parent->self_exec_id) + if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id)) sig = SIGCHLD; } diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 62949e56064a9e271763466c0c6d4640581f3bf7..521c38e9ac14f57a4cf9c04715c060a67ea9ed4b 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -1494,14 +1494,6 @@ static struct ctl_table vm_table[] = { .proc_handler = min_free_kbytes_sysctl_handler, .extra1 = &zero, }, - { - .procname = "watermark_boost_factor", - .data = &watermark_boost_factor, - .maxlen = sizeof(watermark_boost_factor), - .mode = 0644, - .proc_handler = watermark_boost_factor_sysctl_handler, - .extra1 = &zero, - }, { .procname = "watermark_scale_factor", .data = &watermark_scale_factor, diff --git a/kernel/time/Makefile b/kernel/time/Makefile index f1e46f338a9c97ab35d8edbee63299374b4e0edf..1867044800bb43239856c57ce728aa8febedbd8a 100644 --- a/kernel/time/Makefile +++ b/kernel/time/Makefile @@ -16,5 +16,6 @@ ifeq ($(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST),y) endif obj-$(CONFIG_GENERIC_SCHED_CLOCK) += sched_clock.o obj-$(CONFIG_TICK_ONESHOT) += tick-oneshot.o tick-sched.o +obj-$(CONFIG_HAVE_GENERIC_VDSO) += vsyscall.o obj-$(CONFIG_DEBUG_FS) += timekeeping_debug.o obj-$(CONFIG_TEST_UDELAY) += test_udelay.o diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index e70e9a789865788a0997eb01712fa8338e2b2fee..95c62850d9a41b5dbe99291812755efbc46a0bf5 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -1067,6 +1067,7 @@ ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next) return ktime_sub(next_event, now); } +EXPORT_SYMBOL_GPL(tick_nohz_get_sleep_length); /** * tick_nohz_get_idle_calls_cpu - return the current idle calls counter value diff --git a/kernel/time/vsyscall.c b/kernel/time/vsyscall.c new file mode 100644 index 0000000000000000000000000000000000000000..a808931808261eeeabed43c9b6df69917fbdc204 --- /dev/null +++ b/kernel/time/vsyscall.c @@ -0,0 +1,133 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2019 ARM Ltd. + * + * Generic implementation of update_vsyscall and update_vsyscall_tz. + * + * Based on the x86 specific implementation. + */ + +#include +#include +#include +#include +#include + +static inline void update_vdso_data(struct vdso_data *vdata, + struct timekeeper *tk) +{ + struct vdso_timestamp *vdso_ts; + u64 nsec; + + vdata[CS_HRES_COARSE].cycle_last = tk->tkr_mono.cycle_last; + vdata[CS_HRES_COARSE].mask = tk->tkr_mono.mask; + vdata[CS_HRES_COARSE].mult = tk->tkr_mono.mult; + vdata[CS_HRES_COARSE].shift = tk->tkr_mono.shift; + vdata[CS_RAW].cycle_last = tk->tkr_raw.cycle_last; + vdata[CS_RAW].mask = tk->tkr_raw.mask; + vdata[CS_RAW].mult = tk->tkr_raw.mult; + vdata[CS_RAW].shift = tk->tkr_raw.shift; + + /* CLOCK_REALTIME */ + vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_REALTIME]; + vdso_ts->sec = tk->xtime_sec; + vdso_ts->nsec = tk->tkr_mono.xtime_nsec; + + /* CLOCK_MONOTONIC */ + vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_MONOTONIC]; + vdso_ts->sec = tk->xtime_sec + tk->wall_to_monotonic.tv_sec; + + nsec = tk->tkr_mono.xtime_nsec; + nsec += ((u64)tk->wall_to_monotonic.tv_nsec << tk->tkr_mono.shift); + while (nsec >= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) { + nsec -= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift); + vdso_ts->sec++; + } + vdso_ts->nsec = nsec; + + /* CLOCK_MONOTONIC_RAW */ + vdso_ts = &vdata[CS_RAW].basetime[CLOCK_MONOTONIC_RAW]; + vdso_ts->sec = tk->raw_sec; + vdso_ts->nsec = tk->tkr_raw.xtime_nsec; + + /* CLOCK_BOOTTIME */ + vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_BOOTTIME]; + vdso_ts->sec = tk->xtime_sec + tk->wall_to_monotonic.tv_sec; + nsec = tk->tkr_mono.xtime_nsec; + nsec += ((u64)(tk->wall_to_monotonic.tv_nsec + + ktime_to_ns(tk->offs_boot)) << tk->tkr_mono.shift); + while (nsec >= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) { + nsec -= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift); + vdso_ts->sec++; + } + vdso_ts->nsec = nsec; + + /* CLOCK_TAI */ + vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_TAI]; + vdso_ts->sec = tk->xtime_sec + (s64)tk->tai_offset; + vdso_ts->nsec = tk->tkr_mono.xtime_nsec; + + /* + * Read without the seqlock held by clock_getres(). + * Note: No need to have a second copy. + */ + WRITE_ONCE(vdata[CS_HRES_COARSE].hrtimer_res, hrtimer_resolution); +} + +void update_vsyscall(struct timekeeper *tk) +{ + struct vdso_data *vdata = __arch_get_k_vdso_data(); + struct vdso_timestamp *vdso_ts; + u64 nsec; + + if (__arch_update_vdso_data()) { + /* + * Some architectures might want to skip the update of the + * data page. + */ + return; + } + + /* copy vsyscall data */ + vdso_write_begin(vdata); + + vdata[CS_HRES_COARSE].clock_mode = __arch_get_clock_mode(tk); + vdata[CS_RAW].clock_mode = __arch_get_clock_mode(tk); + + /* CLOCK_REALTIME_COARSE */ + vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_REALTIME_COARSE]; + vdso_ts->sec = tk->xtime_sec; + vdso_ts->nsec = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift; + + /* CLOCK_MONOTONIC_COARSE */ + vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_MONOTONIC_COARSE]; + vdso_ts->sec = tk->xtime_sec + tk->wall_to_monotonic.tv_sec; + nsec = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift; + nsec = nsec + tk->wall_to_monotonic.tv_nsec; + while (nsec >= NSEC_PER_SEC) { + nsec = nsec - NSEC_PER_SEC; + vdso_ts->sec++; + } + vdso_ts->nsec = nsec; + + if (__arch_use_vsyscall(vdata)) + update_vdso_data(vdata, tk); + + __arch_update_vsyscall(vdata, tk); + + vdso_write_end(vdata); + + __arch_sync_vdso_data(vdata); +} + +void update_vsyscall_tz(void) +{ + struct vdso_data *vdata = __arch_get_k_vdso_data(); + + if (__arch_use_vsyscall(vdata)) { + vdata[CS_HRES_COARSE].tz_minuteswest = sys_tz.tz_minuteswest; + vdata[CS_HRES_COARSE].tz_dsttime = sys_tz.tz_dsttime; + } + + __arch_sync_vdso_data(vdata); +} diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 2868d85f1fb1d3286984c4727f0519957ac069a9..6cea8bbca03cb03bfa483e39188663d008256da2 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -336,6 +336,7 @@ static void put_probe_ref(void) static void blk_trace_cleanup(struct blk_trace *bt) { + synchronize_rcu(); blk_trace_free(bt); put_probe_ref(); } @@ -636,8 +637,10 @@ static int compat_blk_trace_setup(struct request_queue *q, char *name, static int __blk_trace_startstop(struct request_queue *q, int start) { int ret; - struct blk_trace *bt = q->blk_trace; + struct blk_trace *bt; + bt = rcu_dereference_protected(q->blk_trace, + lockdep_is_held(&q->blk_trace_mutex)); if (bt == NULL) return -EINVAL; @@ -746,8 +749,8 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg) void blk_trace_shutdown(struct request_queue *q) { mutex_lock(&q->blk_trace_mutex); - - if (q->blk_trace) { + if (rcu_dereference_protected(q->blk_trace, + lockdep_is_held(&q->blk_trace_mutex))) { __blk_trace_startstop(q, 0); __blk_trace_remove(q); } @@ -759,8 +762,10 @@ void blk_trace_shutdown(struct request_queue *q) static union kernfs_node_id * blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio) { - struct blk_trace *bt = q->blk_trace; + struct blk_trace *bt; + /* We don't use the 'bt' value here except as an optimization... */ + bt = rcu_dereference_protected(q->blk_trace, 1); if (!bt || !(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP)) return NULL; @@ -805,10 +810,14 @@ static void blk_add_trace_rq(struct request *rq, int error, unsigned int nr_bytes, u32 what, union kernfs_node_id *cgid) { - struct blk_trace *bt = rq->q->blk_trace; + struct blk_trace *bt; - if (likely(!bt)) + rcu_read_lock(); + bt = rcu_dereference(rq->q->blk_trace); + if (likely(!bt)) { + rcu_read_unlock(); return; + } if (blk_rq_is_passthrough(rq)) what |= BLK_TC_ACT(BLK_TC_PC); @@ -817,6 +826,7 @@ static void blk_add_trace_rq(struct request *rq, int error, __blk_add_trace(bt, blk_rq_trace_sector(rq), nr_bytes, req_op(rq), rq->cmd_flags, what, error, 0, NULL, cgid); + rcu_read_unlock(); } static void blk_add_trace_rq_insert(void *ignore, @@ -862,14 +872,19 @@ static void blk_add_trace_rq_complete(void *ignore, struct request *rq, static void blk_add_trace_bio(struct request_queue *q, struct bio *bio, u32 what, int error) { - struct blk_trace *bt = q->blk_trace; + struct blk_trace *bt; - if (likely(!bt)) + rcu_read_lock(); + bt = rcu_dereference(q->blk_trace); + if (likely(!bt)) { + rcu_read_unlock(); return; + } __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, bio_op(bio), bio->bi_opf, what, error, 0, NULL, blk_trace_bio_get_cgid(q, bio)); + rcu_read_unlock(); } static void blk_add_trace_bio_bounce(void *ignore, @@ -914,11 +929,14 @@ static void blk_add_trace_getrq(void *ignore, if (bio) blk_add_trace_bio(q, bio, BLK_TA_GETRQ, 0); else { - struct blk_trace *bt = q->blk_trace; + struct blk_trace *bt; + rcu_read_lock(); + bt = rcu_dereference(q->blk_trace); if (bt) __blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_GETRQ, 0, 0, NULL, NULL); + rcu_read_unlock(); } } @@ -930,27 +948,35 @@ static void blk_add_trace_sleeprq(void *ignore, if (bio) blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ, 0); else { - struct blk_trace *bt = q->blk_trace; + struct blk_trace *bt; + rcu_read_lock(); + bt = rcu_dereference(q->blk_trace); if (bt) __blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_SLEEPRQ, 0, 0, NULL, NULL); + rcu_read_unlock(); } } static void blk_add_trace_plug(void *ignore, struct request_queue *q) { - struct blk_trace *bt = q->blk_trace; + struct blk_trace *bt; + rcu_read_lock(); + bt = rcu_dereference(q->blk_trace); if (bt) __blk_add_trace(bt, 0, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL, NULL); + rcu_read_unlock(); } static void blk_add_trace_unplug(void *ignore, struct request_queue *q, unsigned int depth, bool explicit) { - struct blk_trace *bt = q->blk_trace; + struct blk_trace *bt; + rcu_read_lock(); + bt = rcu_dereference(q->blk_trace); if (bt) { __be64 rpdu = cpu_to_be64(depth); u32 what; @@ -962,14 +988,17 @@ static void blk_add_trace_unplug(void *ignore, struct request_queue *q, __blk_add_trace(bt, 0, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu, NULL); } + rcu_read_unlock(); } static void blk_add_trace_split(void *ignore, struct request_queue *q, struct bio *bio, unsigned int pdu) { - struct blk_trace *bt = q->blk_trace; + struct blk_trace *bt; + rcu_read_lock(); + bt = rcu_dereference(q->blk_trace); if (bt) { __be64 rpdu = cpu_to_be64(pdu); @@ -978,6 +1007,7 @@ static void blk_add_trace_split(void *ignore, BLK_TA_SPLIT, bio->bi_status, sizeof(rpdu), &rpdu, blk_trace_bio_get_cgid(q, bio)); } + rcu_read_unlock(); } /** @@ -997,11 +1027,15 @@ static void blk_add_trace_bio_remap(void *ignore, struct request_queue *q, struct bio *bio, dev_t dev, sector_t from) { - struct blk_trace *bt = q->blk_trace; + struct blk_trace *bt; struct blk_io_trace_remap r; - if (likely(!bt)) + rcu_read_lock(); + bt = rcu_dereference(q->blk_trace); + if (likely(!bt)) { + rcu_read_unlock(); return; + } r.device_from = cpu_to_be32(dev); r.device_to = cpu_to_be32(bio_dev(bio)); @@ -1010,6 +1044,7 @@ static void blk_add_trace_bio_remap(void *ignore, __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, bio_op(bio), bio->bi_opf, BLK_TA_REMAP, bio->bi_status, sizeof(r), &r, blk_trace_bio_get_cgid(q, bio)); + rcu_read_unlock(); } /** @@ -1030,11 +1065,15 @@ static void blk_add_trace_rq_remap(void *ignore, struct request *rq, dev_t dev, sector_t from) { - struct blk_trace *bt = q->blk_trace; + struct blk_trace *bt; struct blk_io_trace_remap r; - if (likely(!bt)) + rcu_read_lock(); + bt = rcu_dereference(q->blk_trace); + if (likely(!bt)) { + rcu_read_unlock(); return; + } r.device_from = cpu_to_be32(dev); r.device_to = cpu_to_be32(disk_devt(rq->rq_disk)); @@ -1043,6 +1082,7 @@ static void blk_add_trace_rq_remap(void *ignore, __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), rq_data_dir(rq), 0, BLK_TA_REMAP, 0, sizeof(r), &r, blk_trace_request_get_cgid(q, rq)); + rcu_read_unlock(); } /** @@ -1060,14 +1100,19 @@ void blk_add_driver_data(struct request_queue *q, struct request *rq, void *data, size_t len) { - struct blk_trace *bt = q->blk_trace; + struct blk_trace *bt; - if (likely(!bt)) + rcu_read_lock(); + bt = rcu_dereference(q->blk_trace); + if (likely(!bt)) { + rcu_read_unlock(); return; + } __blk_add_trace(bt, blk_rq_trace_sector(rq), blk_rq_bytes(rq), 0, 0, BLK_TA_DRV_DATA, 0, len, data, blk_trace_request_get_cgid(q, rq)); + rcu_read_unlock(); } EXPORT_SYMBOL_GPL(blk_add_driver_data); @@ -1594,6 +1639,7 @@ static int blk_trace_remove_queue(struct request_queue *q) return -EINVAL; put_probe_ref(); + synchronize_rcu(); blk_trace_free(bt); return 0; } @@ -1755,6 +1801,7 @@ static ssize_t sysfs_blk_trace_attr_show(struct device *dev, struct hd_struct *p = dev_to_part(dev); struct request_queue *q; struct block_device *bdev; + struct blk_trace *bt; ssize_t ret = -ENXIO; bdev = bdget(part_devt(p)); @@ -1767,21 +1814,23 @@ static ssize_t sysfs_blk_trace_attr_show(struct device *dev, mutex_lock(&q->blk_trace_mutex); + bt = rcu_dereference_protected(q->blk_trace, + lockdep_is_held(&q->blk_trace_mutex)); if (attr == &dev_attr_enable) { - ret = sprintf(buf, "%u\n", !!q->blk_trace); + ret = sprintf(buf, "%u\n", !!bt); goto out_unlock_bdev; } - if (q->blk_trace == NULL) + if (bt == NULL) ret = sprintf(buf, "disabled\n"); else if (attr == &dev_attr_act_mask) - ret = blk_trace_mask2str(buf, q->blk_trace->act_mask); + ret = blk_trace_mask2str(buf, bt->act_mask); else if (attr == &dev_attr_pid) - ret = sprintf(buf, "%u\n", q->blk_trace->pid); + ret = sprintf(buf, "%u\n", bt->pid); else if (attr == &dev_attr_start_lba) - ret = sprintf(buf, "%llu\n", q->blk_trace->start_lba); + ret = sprintf(buf, "%llu\n", bt->start_lba); else if (attr == &dev_attr_end_lba) - ret = sprintf(buf, "%llu\n", q->blk_trace->end_lba); + ret = sprintf(buf, "%llu\n", bt->end_lba); out_unlock_bdev: mutex_unlock(&q->blk_trace_mutex); @@ -1798,6 +1847,7 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev, struct block_device *bdev; struct request_queue *q; struct hd_struct *p; + struct blk_trace *bt; u64 value; ssize_t ret = -EINVAL; @@ -1828,8 +1878,10 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev, mutex_lock(&q->blk_trace_mutex); + bt = rcu_dereference_protected(q->blk_trace, + lockdep_is_held(&q->blk_trace_mutex)); if (attr == &dev_attr_enable) { - if (!!value == !!q->blk_trace) { + if (!!value == !!bt) { ret = 0; goto out_unlock_bdev; } @@ -1841,18 +1893,21 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev, } ret = 0; - if (q->blk_trace == NULL) + if (bt == NULL) { ret = blk_trace_setup_queue(q, bdev); + bt = rcu_dereference_protected(q->blk_trace, + lockdep_is_held(&q->blk_trace_mutex)); + } if (ret == 0) { if (attr == &dev_attr_act_mask) - q->blk_trace->act_mask = value; + bt->act_mask = value; else if (attr == &dev_attr_pid) - q->blk_trace->pid = value; + bt->pid = value; else if (attr == &dev_attr_start_lba) - q->blk_trace->start_lba = value; + bt->start_lba = value; else if (attr == &dev_attr_end_lba) - q->blk_trace->end_lba = value; + bt->end_lba = value; } out_unlock_bdev: diff --git a/kernel/trace/power-traces.c b/kernel/trace/power-traces.c index 21bb161c231698aff56c96d5d5908da1ef3af97d..df060af7fd4a942781a4e9d7a3b52391523fdb7a 100644 --- a/kernel/trace/power-traces.c +++ b/kernel/trace/power-traces.c @@ -19,3 +19,4 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(cpu_idle); EXPORT_TRACEPOINT_SYMBOL_GPL(cpu_frequency); EXPORT_TRACEPOINT_SYMBOL_GPL(powernv_throttle); +EXPORT_TRACEPOINT_SYMBOL_GPL(clock_set_rate); diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index c41f7d1ab5fa2ae24c25852ae16251cb2f632568..4966410bb0f4d8a3f615d013d2e01bb076b1573d 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -7750,6 +7750,19 @@ static int allocate_trace_buffers(struct trace_array *tr, int size) */ allocate_snapshot = false; #endif + + /* + * Because of some magic with the way alloc_percpu() works on + * x86_64, we need to synchronize the pgd of all the tables, + * otherwise the trace events that happen in x86_64 page fault + * handlers can't cope with accessing the chance that a + * alloc_percpu()'d memory might be touched in the page fault trace + * event. Oh, and we need to audit all other alloc_percpu() and vmalloc() + * calls in tracing, because something might get triggered within a + * page fault trace event! + */ + vmalloc_sync_mappings(); + return 0; } diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c index 9300e8bbf08ac80a70ec4103101d9c837c19ba69..38a2a558e546b548c3c52af2b34f480a5cd1f3ff 100644 --- a/kernel/trace/trace_events_trigger.c +++ b/kernel/trace/trace_events_trigger.c @@ -1081,14 +1081,10 @@ register_snapshot_trigger(char *glob, struct event_trigger_ops *ops, struct event_trigger_data *data, struct trace_event_file *file) { - int ret = register_trigger(glob, ops, data, file); - - if (ret > 0 && tracing_alloc_snapshot_instance(file->tr) != 0) { - unregister_trigger(glob, ops, data, file); - ret = 0; - } + if (tracing_alloc_snapshot_instance(file->tr) != 0) + return 0; - return ret; + return register_trigger(glob, ops, data, file); } static int diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index c61b2b0a99e9cc2ec6828a072c0c30b8df78d5d1..c45b017bacd47749370340df22f51078d6fd6a3f 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -538,7 +538,7 @@ static bool __within_notrace_func(unsigned long addr) static bool within_notrace_func(struct trace_kprobe *tk) { - unsigned long addr = addr = trace_kprobe_address(tk); + unsigned long addr = trace_kprobe_address(tk); char symname[KSYM_NAME_LEN], *p; if (!__within_notrace_func(addr)) @@ -975,6 +975,8 @@ static int probes_seq_show(struct seq_file *m, void *v) int i; seq_putc(m, trace_kprobe_is_return(tk) ? 'r' : 'p'); + if (trace_kprobe_is_return(tk) && tk->rp.maxactive) + seq_printf(m, "%d", tk->rp.maxactive); seq_printf(m, ":%s/%s", tk->tp.call.class->system, trace_event_name(&tk->tp.call)); diff --git a/kernel/umh.c b/kernel/umh.c index c449858946afc23706e06ab1e893ba0cef4c4f99..52a9084f85419b8127aee4b1ef2c6f380b3fa870 100644 --- a/kernel/umh.c +++ b/kernel/umh.c @@ -522,6 +522,11 @@ EXPORT_SYMBOL_GPL(fork_usermode_blob); * Runs a user-space application. The application is started * asynchronously if wait is not set, and runs as a child of system workqueues. * (ie. it runs with full root capabilities and optimized affinity). + * + * Note: successful return value does not guarantee the helper was called at + * all. You can't rely on sub_info->{init,cleanup} being called even for + * UMH_WAIT_* wait modes as STATIC_USERMODEHELPER_PATH="" turns all helpers + * into a successful no-op. */ int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait) { diff --git a/lib/Kconfig b/lib/Kconfig index a3928d4438b5008c1fa01470de11245d1557bc33..10a40311b846a1dedf5e742c22c0e194a2b03e29 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -554,6 +554,11 @@ config OID_REGISTRY config UCS2_STRING tristate +# +# generic vdso +# +source "lib/vdso/Kconfig" + source "lib/fonts/Kconfig" config SG_SPLIT diff --git a/lib/devres.c b/lib/devres.c index aa0f5308ac6be8f9bcc3826132c856cd8fa8db06..75ea32d9b661bdc3f7fbeb64044601213bc82c7b 100644 --- a/lib/devres.c +++ b/lib/devres.c @@ -9,6 +9,7 @@ enum devm_ioremap_type { DEVM_IOREMAP = 0, DEVM_IOREMAP_NC, + DEVM_IOREMAP_UC, DEVM_IOREMAP_WC, }; @@ -39,6 +40,9 @@ static void __iomem *__devm_ioremap(struct device *dev, resource_size_t offset, case DEVM_IOREMAP_NC: addr = ioremap_nocache(offset, size); break; + case DEVM_IOREMAP_UC: + addr = ioremap_uc(offset, size); + break; case DEVM_IOREMAP_WC: addr = ioremap_wc(offset, size); break; @@ -68,6 +72,21 @@ void __iomem *devm_ioremap(struct device *dev, resource_size_t offset, } EXPORT_SYMBOL(devm_ioremap); +/** + * devm_ioremap_uc - Managed ioremap_uc() + * @dev: Generic device to remap IO address for + * @offset: Resource address to map + * @size: Size of map + * + * Managed ioremap_uc(). Map is automatically unmapped on driver detach. + */ +void __iomem *devm_ioremap_uc(struct device *dev, resource_size_t offset, + resource_size_t size) +{ + return __devm_ioremap(dev, offset, size, DEVM_IOREMAP_UC); +} +EXPORT_SYMBOL_GPL(devm_ioremap_uc); + /** * devm_ioremap_nocache - Managed ioremap_nocache() * @dev: Generic device to remap IO address for diff --git a/lib/find_bit.c b/lib/find_bit.c index ee3df93ba69af9f5c17d811d06ac0b9a3de8c884..8a5492173267d64675c0a62edd6822a4e7def412 100644 --- a/lib/find_bit.c +++ b/lib/find_bit.c @@ -153,18 +153,6 @@ EXPORT_SYMBOL(find_last_bit); #ifdef __BIG_ENDIAN -/* include/linux/byteorder does not support "unsigned long" type */ -static inline unsigned long ext2_swab(const unsigned long y) -{ -#if BITS_PER_LONG == 64 - return (unsigned long) __swab64((u64) y); -#elif BITS_PER_LONG == 32 - return (unsigned long) __swab32((u32) y); -#else -#error BITS_PER_LONG not defined -#endif -} - #if !defined(find_next_bit_le) || !defined(find_next_zero_bit_le) static inline unsigned long _find_next_bit_le(const unsigned long *addr1, const unsigned long *addr2, unsigned long nbits, @@ -181,7 +169,7 @@ static inline unsigned long _find_next_bit_le(const unsigned long *addr1, tmp ^= invert; /* Handle 1st word. */ - tmp &= ext2_swab(BITMAP_FIRST_WORD_MASK(start)); + tmp &= swab(BITMAP_FIRST_WORD_MASK(start)); start = round_down(start, BITS_PER_LONG); while (!tmp) { @@ -195,7 +183,7 @@ static inline unsigned long _find_next_bit_le(const unsigned long *addr1, tmp ^= invert; } - return min(start + __ffs(ext2_swab(tmp)), nbits); + return min(start + __ffs(swab(tmp)), nbits); } #endif diff --git a/lib/ioremap.c b/lib/ioremap.c index 517f5853ffed1726a462543f902450ddb7f9a9b4..61ed878bd6f8eebb9e8c2d77f8413de8102d81b1 100644 --- a/lib/ioremap.c +++ b/lib/ioremap.c @@ -181,3 +181,4 @@ int ioremap_page_range(unsigned long addr, return err; } +EXPORT_SYMBOL_GPL(ioremap_page_range); diff --git a/lib/mpi/longlong.h b/lib/mpi/longlong.h index 08c60d10747fddc204788e75f24107805702caf4..e01b705556aa68fb156d4e481c07c9677dde83d8 100644 --- a/lib/mpi/longlong.h +++ b/lib/mpi/longlong.h @@ -756,22 +756,22 @@ do { \ do { \ if (__builtin_constant_p(bh) && (bh) == 0) \ __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{aze|addze} %0,%2" \ - : "=r" ((USItype)(sh)), \ - "=&r" ((USItype)(sl)) \ + : "=r" (sh), \ + "=&r" (sl) \ : "%r" ((USItype)(ah)), \ "%r" ((USItype)(al)), \ "rI" ((USItype)(bl))); \ else if (__builtin_constant_p(bh) && (bh) == ~(USItype) 0) \ __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{ame|addme} %0,%2" \ - : "=r" ((USItype)(sh)), \ - "=&r" ((USItype)(sl)) \ + : "=r" (sh), \ + "=&r" (sl) \ : "%r" ((USItype)(ah)), \ "%r" ((USItype)(al)), \ "rI" ((USItype)(bl))); \ else \ __asm__ ("{a%I5|add%I5c} %1,%4,%5\n\t{ae|adde} %0,%2,%3" \ - : "=r" ((USItype)(sh)), \ - "=&r" ((USItype)(sl)) \ + : "=r" (sh), \ + "=&r" (sl) \ : "%r" ((USItype)(ah)), \ "r" ((USItype)(bh)), \ "%r" ((USItype)(al)), \ @@ -781,36 +781,36 @@ do { \ do { \ if (__builtin_constant_p(ah) && (ah) == 0) \ __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfze|subfze} %0,%2" \ - : "=r" ((USItype)(sh)), \ - "=&r" ((USItype)(sl)) \ + : "=r" (sh), \ + "=&r" (sl) \ : "r" ((USItype)(bh)), \ "rI" ((USItype)(al)), \ "r" ((USItype)(bl))); \ else if (__builtin_constant_p(ah) && (ah) == ~(USItype) 0) \ __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfme|subfme} %0,%2" \ - : "=r" ((USItype)(sh)), \ - "=&r" ((USItype)(sl)) \ + : "=r" (sh), \ + "=&r" (sl) \ : "r" ((USItype)(bh)), \ "rI" ((USItype)(al)), \ "r" ((USItype)(bl))); \ else if (__builtin_constant_p(bh) && (bh) == 0) \ __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{ame|addme} %0,%2" \ - : "=r" ((USItype)(sh)), \ - "=&r" ((USItype)(sl)) \ + : "=r" (sh), \ + "=&r" (sl) \ : "r" ((USItype)(ah)), \ "rI" ((USItype)(al)), \ "r" ((USItype)(bl))); \ else if (__builtin_constant_p(bh) && (bh) == ~(USItype) 0) \ __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{aze|addze} %0,%2" \ - : "=r" ((USItype)(sh)), \ - "=&r" ((USItype)(sl)) \ + : "=r" (sh), \ + "=&r" (sl) \ : "r" ((USItype)(ah)), \ "rI" ((USItype)(al)), \ "r" ((USItype)(bl))); \ else \ __asm__ ("{sf%I4|subf%I4c} %1,%5,%4\n\t{sfe|subfe} %0,%3,%2" \ - : "=r" ((USItype)(sh)), \ - "=&r" ((USItype)(sl)) \ + : "=r" (sh), \ + "=&r" (sl) \ : "r" ((USItype)(ah)), \ "r" ((USItype)(bh)), \ "rI" ((USItype)(al)), \ @@ -821,7 +821,7 @@ do { \ do { \ USItype __m0 = (m0), __m1 = (m1); \ __asm__ ("mulhwu %0,%1,%2" \ - : "=r" ((USItype) ph) \ + : "=r" (ph) \ : "%r" (__m0), \ "r" (__m1)); \ (pl) = __m0 * __m1; \ diff --git a/lib/plist.c b/lib/plist.c index 30162b4be1c10f67a2271849001e6110944afa6b..b2660a48a57202be77115c6e1e881a67feed1a13 100644 --- a/lib/plist.c +++ b/lib/plist.c @@ -133,6 +133,7 @@ void plist_del(struct plist_node *node, struct plist_head *head) plist_check_head(head); } +EXPORT_SYMBOL_GPL(plist_del); /** * plist_requeue - Requeue @node at end of same-prio entries. diff --git a/lib/raid6/neon.uc b/lib/raid6/neon.uc index d5242f5445511784bc821f3cd64a4487f2673c97..b7c68030da4fd94d5e6bf3efca157127288e9f9a 100644 --- a/lib/raid6/neon.uc +++ b/lib/raid6/neon.uc @@ -28,7 +28,6 @@ typedef uint8x16_t unative_t; -#define NBYTES(x) ((unative_t){x,x,x,x, x,x,x,x, x,x,x,x, x,x,x,x}) #define NSIZE sizeof(unative_t) /* @@ -61,7 +60,7 @@ void raid6_neon$#_gen_syndrome_real(int disks, unsigned long bytes, void **ptrs) int d, z, z0; register unative_t wd$$, wq$$, wp$$, w1$$, w2$$; - const unative_t x1d = NBYTES(0x1d); + const unative_t x1d = vdupq_n_u8(0x1d); z0 = disks - 3; /* Highest data disk */ p = dptr[z0+1]; /* XOR parity */ @@ -92,7 +91,7 @@ void raid6_neon$#_xor_syndrome_real(int disks, int start, int stop, int d, z, z0; register unative_t wd$$, wq$$, wp$$, w1$$, w2$$; - const unative_t x1d = NBYTES(0x1d); + const unative_t x1d = vdupq_n_u8(0x1d); z0 = stop; /* P/Q right side optimization */ p = dptr[disks-2]; /* XOR parity */ diff --git a/lib/raid6/recov_neon_inner.c b/lib/raid6/recov_neon_inner.c index 8cd20c9f834a1e8cc90e9a4f783f74a4244a5f7a..7d00c31a654706763eb29da6952bb7033e635eee 100644 --- a/lib/raid6/recov_neon_inner.c +++ b/lib/raid6/recov_neon_inner.c @@ -10,11 +10,6 @@ #include -static const uint8x16_t x0f = { - 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, - 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, -}; - #ifdef CONFIG_ARM /* * AArch32 does not provide this intrinsic natively because it does not @@ -41,6 +36,7 @@ void __raid6_2data_recov_neon(int bytes, uint8_t *p, uint8_t *q, uint8_t *dp, uint8x16_t pm1 = vld1q_u8(pbmul + 16); uint8x16_t qm0 = vld1q_u8(qmul); uint8x16_t qm1 = vld1q_u8(qmul + 16); + uint8x16_t x0f = vdupq_n_u8(0x0f); /* * while ( bytes-- ) { @@ -87,6 +83,7 @@ void __raid6_datap_recov_neon(int bytes, uint8_t *p, uint8_t *q, uint8_t *dq, { uint8x16_t qm0 = vld1q_u8(qmul); uint8x16_t qm1 = vld1q_u8(qmul + 16); + uint8x16_t x0f = vdupq_n_u8(0x0f); /* * while (bytes--) { diff --git a/lib/vdso/Kconfig b/lib/vdso/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..9fe698ff62ec4ffcaaf6c9e838c358bc31a57bca --- /dev/null +++ b/lib/vdso/Kconfig @@ -0,0 +1,27 @@ +# SPDX-License-Identifier: GPL-2.0 + +config HAVE_GENERIC_VDSO + bool + +if HAVE_GENERIC_VDSO + +config GENERIC_GETTIMEOFDAY + bool + help + This is a generic implementation of gettimeofday vdso. + Each architecture that enables this feature has to + provide the fallback implementation. + +config GENERIC_VDSO_32 + bool + depends on GENERIC_GETTIMEOFDAY && !64BIT + help + This config option helps to avoid possible performance issues + in 32 bit only architectures. + +config GENERIC_COMPAT_VDSO + bool + help + This config option enables the compat VDSO layer. + +endif diff --git a/lib/vdso/Makefile b/lib/vdso/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..c415a685d61bb16ae92d196fb035ca7e3f397c6d --- /dev/null +++ b/lib/vdso/Makefile @@ -0,0 +1,22 @@ +# SPDX-License-Identifier: GPL-2.0 + +GENERIC_VDSO_MK_PATH := $(abspath $(lastword $(MAKEFILE_LIST))) +GENERIC_VDSO_DIR := $(dir $(GENERIC_VDSO_MK_PATH)) + +c-gettimeofday-$(CONFIG_GENERIC_GETTIMEOFDAY) := $(addprefix $(GENERIC_VDSO_DIR), gettimeofday.c) + +# This cmd checks that the vdso library does not contain absolute relocation +# It has to be called after the linking of the vdso library and requires it +# as a parameter. +# +# $(ARCH_REL_TYPE_ABS) is defined in the arch specific makefile and corresponds +# to the absolute relocation types printed by "objdump -R" and accepted by the +# dynamic linker. +ifndef ARCH_REL_TYPE_ABS +$(error ARCH_REL_TYPE_ABS is not set) +endif + +quiet_cmd_vdso_check = VDSOCHK $@ + cmd_vdso_check = if $(OBJDUMP) -R $@ | egrep -h "$(ARCH_REL_TYPE_ABS)"; \ + then (echo >&2 "$@: dynamic relocations are not supported"; \ + rm -f $@; /bin/false); fi diff --git a/lib/vdso/gettimeofday.c b/lib/vdso/gettimeofday.c new file mode 100644 index 0000000000000000000000000000000000000000..4f82b7abe0760d4065813579adbbebea563118f0 --- /dev/null +++ b/lib/vdso/gettimeofday.c @@ -0,0 +1,246 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Generic userspace implementations of gettimeofday() and similar. + */ +#include +#include + +#ifndef vdso_calc_delta +/* + * Default implementation which works for all sane clocksources. That + * obviously excludes x86/TSC. + */ +static __always_inline +u64 vdso_calc_delta(u64 cycles, u64 last, u64 mask, u32 mult) +{ + return ((cycles - last) & mask) * mult; +} +#endif + +#ifndef __arch_vdso_hres_capable +static inline bool __arch_vdso_hres_capable(void) +{ + return true; +} +#endif + +static __always_inline int do_hres(const struct vdso_data *vd, clockid_t clk, + struct __kernel_timespec *ts) +{ + const struct vdso_timestamp *vdso_ts = &vd->basetime[clk]; + u64 cycles, last, sec, ns; + u32 seq; + + /* Allows to compile the high resolution parts out */ + if (!__arch_vdso_hres_capable()) + return -1; + + do { + seq = vdso_read_begin(vd); + cycles = __arch_get_hw_counter(vd->clock_mode); + ns = vdso_ts->nsec; + last = vd->cycle_last; + if (unlikely((s64)cycles < 0)) + return -1; + + ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult); + ns >>= vd->shift; + sec = vdso_ts->sec; + } while (unlikely(vdso_read_retry(vd, seq))); + + /* + * Do this outside the loop: a race inside the loop could result + * in __iter_div_u64_rem() being extremely slow. + */ + ts->tv_sec = sec + __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns); + ts->tv_nsec = ns; + + return 0; +} + +static __always_inline int do_coarse(const struct vdso_data *vd, clockid_t clk, + struct __kernel_timespec *ts) +{ + const struct vdso_timestamp *vdso_ts = &vd->basetime[clk]; + u32 seq; + + do { + seq = vdso_read_begin(vd); + ts->tv_sec = vdso_ts->sec; + ts->tv_nsec = vdso_ts->nsec; + } while (unlikely(vdso_read_retry(vd, seq))); + + return 0; +} + +static __maybe_unused int +__cvdso_clock_gettime_common(clockid_t clock, struct __kernel_timespec *ts) +{ + const struct vdso_data *vd = __arch_get_vdso_data(); + u32 msk; + + /* Check for negative values or invalid clocks */ + if (unlikely((u32) clock >= MAX_CLOCKS)) + return -1; + + /* + * Convert the clockid to a bitmask and use it to check which + * clocks are handled in the VDSO directly. + */ + msk = 1U << clock; + if (likely(msk & VDSO_HRES)) + vd = &vd[CS_HRES_COARSE]; + else if (msk & VDSO_COARSE) + return do_coarse(&vd[CS_HRES_COARSE], clock, ts); + else if (msk & VDSO_RAW) + vd = &vd[CS_RAW]; + else + return -1; + + return do_hres(vd, clock, ts); +} + +static __maybe_unused int +__cvdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts) +{ + int ret = __cvdso_clock_gettime_common(clock, ts); + + if (unlikely(ret)) + return clock_gettime_fallback(clock, ts); + return 0; +} + +#ifdef BUILD_VDSO32 +static __maybe_unused int +__cvdso_clock_gettime32(clockid_t clock, struct old_timespec32 *res) +{ + struct __kernel_timespec ts; + int ret; + + ret = __cvdso_clock_gettime_common(clock, &ts); + +#ifdef VDSO_HAS_32BIT_FALLBACK + if (unlikely(ret)) + return clock_gettime32_fallback(clock, res); +#else + if (unlikely(ret)) + ret = clock_gettime_fallback(clock, &ts); +#endif + + /* For ret == 0 */ + res->tv_sec = ts.tv_sec; + res->tv_nsec = ts.tv_nsec; + + return ret; +} +#endif /* BUILD_VDSO32 */ + +static __maybe_unused int +__cvdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz) +{ + const struct vdso_data *vd = __arch_get_vdso_data(); + + if (likely(tv != NULL)) { + struct __kernel_timespec ts; + + if (do_hres(&vd[CS_HRES_COARSE], CLOCK_REALTIME, &ts)) + return gettimeofday_fallback(tv, tz); + + tv->tv_sec = ts.tv_sec; + tv->tv_usec = (u32)ts.tv_nsec / NSEC_PER_USEC; + } + + if (unlikely(tz != NULL)) { + tz->tz_minuteswest = vd[CS_HRES_COARSE].tz_minuteswest; + tz->tz_dsttime = vd[CS_HRES_COARSE].tz_dsttime; + } + + return 0; +} + +#ifdef VDSO_HAS_TIME +static __maybe_unused time_t __cvdso_time(time_t *time) +{ + const struct vdso_data *vd = __arch_get_vdso_data(); + time_t t = READ_ONCE(vd[CS_HRES_COARSE].basetime[CLOCK_REALTIME].sec); + + if (time) + *time = t; + + return t; +} +#endif /* VDSO_HAS_TIME */ + +#ifdef VDSO_HAS_CLOCK_GETRES +static __maybe_unused +int __cvdso_clock_getres_common(clockid_t clock, struct __kernel_timespec *res) +{ + const struct vdso_data *vd = __arch_get_vdso_data(); + u32 msk; + u64 ns; + + /* Check for negative values or invalid clocks */ + if (unlikely((u32) clock >= MAX_CLOCKS)) + return -1; + + /* + * Convert the clockid to a bitmask and use it to check which + * clocks are handled in the VDSO directly. + */ + msk = 1U << clock; + if (msk & (VDSO_HRES | VDSO_RAW)) { + /* + * Preserves the behaviour of posix_get_hrtimer_res(). + */ + ns = READ_ONCE(vd[CS_HRES_COARSE].hrtimer_res); + } else if (msk & VDSO_COARSE) { + /* + * Preserves the behaviour of posix_get_coarse_res(). + */ + ns = LOW_RES_NSEC; + } else { + return -1; + } + + if (likely(res)) { + res->tv_sec = 0; + res->tv_nsec = ns; + } + return 0; +} + +static __maybe_unused +int __cvdso_clock_getres(clockid_t clock, struct __kernel_timespec *res) +{ + int ret = __cvdso_clock_getres_common(clock, res); + + if (unlikely(ret)) + return clock_getres_fallback(clock, res); + return 0; +} + +#ifdef BUILD_VDSO32 +static __maybe_unused int +__cvdso_clock_getres_time32(clockid_t clock, struct old_timespec32 *res) +{ + struct __kernel_timespec ts; + int ret; + + ret = __cvdso_clock_getres_common(clock, &ts); + +#ifdef VDSO_HAS_32BIT_FALLBACK + if (unlikely(ret)) + return clock_getres32_fallback(clock, res); +#else + if (unlikely(ret)) + ret = clock_getres_fallback(clock, &ts); +#endif + + if (likely(res)) { + res->tv_sec = ts.tv_sec; + res->tv_nsec = ts.tv_nsec; + } + return ret; +} +#endif /* BUILD_VDSO32 */ +#endif /* VDSO_HAS_CLOCK_GETRES */ diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 630d42636dcf5e8c619972340360e477f717ab54..c826ebaa6563759e3650467160223b1c7084559a 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -1733,6 +1733,7 @@ int ptr_to_hashval(const void *ptr, unsigned long *hashval_out) { return __ptr_to_hashval(ptr, hashval_out); } +EXPORT_SYMBOL_GPL(ptr_to_hashval); /* Maps a pointer to a 32 bit unique identifier. */ static char *ptr_to_id(char *buf, char *end, void *ptr, struct printf_spec spec) diff --git a/mm/Kconfig b/mm/Kconfig index b457e94ae6182a491df70157a607af812b6f1b37..036466b34e18cbc5990559c032f62698d24681c4 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -614,6 +614,22 @@ config ZSMALLOC_STAT information to userspace via debugfs. If unsure, say N. +config MM_EVENT_STAT + bool "Track per-process MM event" + depends on MMU + help + This option enables per-process mm event stat(e.g., fault, reclaim, + compaction and so on ) with some interval(Default is 0.5sec). + Admin can see the stat from trace file via debugfs(e.g., + /sys/kernel/debug/tracing/trace) + + It includes max/average memory allocation latency for the interval + as well as event count so that admin can see what happens in VM side + (how many each event happens and how much processes spent time for + the MM event). If it's too large, that would be not good situation. + + System can dump the trace into bugreport when user allows the dump. + config GENERIC_EARLY_IOREMAP bool diff --git a/mm/Makefile b/mm/Makefile index 26ef77a3883b5c708659425229e975ac74069875..60727566b47e9597225b46063c092891630ded67 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -28,6 +28,9 @@ mmu-$(CONFIG_MMU) := gup.o highmem.o memory.o mincore.o \ rmap.o vmalloc.o +ifdef CONFIG_MM_EVENT_STAT +mmu-$(CONFIG_MMU) += mm_event.o +endif ifdef CONFIG_CROSS_MEMORY_ATTACH mmu-$(CONFIG_MMU) += process_vm_access.o endif diff --git a/mm/compaction.c b/mm/compaction.c index b8ced23f6c5c71af0d17b5e7c910b5e0d6fd39da..120e5559e6b4494eeecfc996643e93f90a992f4e 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -237,70 +237,6 @@ static bool pageblock_skip_persistent(struct page *page) return false; } -static bool -__reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source, - bool check_target) -{ - struct page *page = pfn_to_online_page(pfn); - struct page *end_page; - unsigned long block_pfn; - - if (!page) - return false; - if (zone != page_zone(page)) - return false; - if (pageblock_skip_persistent(page)) - return false; - - /* - * If skip is already cleared do no further checking once the - * restart points have been set. - */ - if (check_source && check_target && !get_pageblock_skip(page)) - return true; - - /* - * If clearing skip for the target scanner, do not select a - * non-movable pageblock as the starting point. - */ - if (!check_source && check_target && - get_pageblock_migratetype(page) != MIGRATE_MOVABLE) - return false; - - /* - * Only clear the hint if a sample indicates there is either a - * free page or an LRU page in the block. One or other condition - * is necessary for the block to be a migration source/target. - */ - block_pfn = pageblock_start_pfn(pfn); - pfn = max(block_pfn, zone->zone_start_pfn); - page = pfn_to_page(pfn); - if (zone != page_zone(page)) - return false; - pfn = block_pfn + pageblock_nr_pages; - pfn = min(pfn, zone_end_pfn(zone)); - end_page = pfn_to_page(pfn); - - do { - if (pfn_valid_within(pfn)) { - if (check_source && PageLRU(page)) { - clear_pageblock_skip(page); - return true; - } - - if (check_target && PageBuddy(page)) { - clear_pageblock_skip(page); - return true; - } - } - - page += (1 << PAGE_ALLOC_COSTLY_ORDER); - pfn += (1 << PAGE_ALLOC_COSTLY_ORDER); - } while (page < end_page); - - return false; -} - /* * This function is called to clear all cached information on pageblocks that * should be skipped for page isolation when the migrate and free page scanner @@ -308,54 +244,30 @@ __reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source, */ static void __reset_isolation_suitable(struct zone *zone) { - unsigned long migrate_pfn = zone->zone_start_pfn; - unsigned long free_pfn = zone_end_pfn(zone); - unsigned long reset_migrate = free_pfn; - unsigned long reset_free = migrate_pfn; - bool source_set = false; - bool free_set = false; - - if (!zone->compact_blockskip_flush) - return; + unsigned long start_pfn = zone->zone_start_pfn; + unsigned long end_pfn = zone_end_pfn(zone); + unsigned long pfn; zone->compact_blockskip_flush = false; - /* - * Walk the zone and update pageblock skip information. Source looks - * for PageLRU while target looks for PageBuddy. When the scanner - * is found, both PageBuddy and PageLRU are checked as the pageblock - * is suitable as both source and target. - */ - for (; migrate_pfn < free_pfn; migrate_pfn += pageblock_nr_pages, - free_pfn -= pageblock_nr_pages) { + /* Walk the zone and mark every pageblock as suitable for isolation */ + for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { + struct page *page; + cond_resched(); - /* Update the migrate PFN */ - if (__reset_isolation_pfn(zone, migrate_pfn, true, source_set) && - migrate_pfn < reset_migrate) { - source_set = true; - reset_migrate = migrate_pfn; - zone->compact_init_migrate_pfn = reset_migrate; - zone->compact_cached_migrate_pfn[0] = reset_migrate; - zone->compact_cached_migrate_pfn[1] = reset_migrate; - } + page = pfn_to_online_page(pfn); + if (!page) + continue; + if (zone != page_zone(page)) + continue; + if (pageblock_skip_persistent(page)) + continue; - /* Update the free PFN */ - if (__reset_isolation_pfn(zone, free_pfn, free_set, true) && - free_pfn > reset_free) { - free_set = true; - reset_free = free_pfn; - zone->compact_init_free_pfn = reset_free; - zone->compact_cached_free_pfn = reset_free; - } + clear_pageblock_skip(page); } - /* Leave no distance if no suitable block was reset */ - if (reset_migrate >= reset_free) { - zone->compact_cached_migrate_pfn[0] = migrate_pfn; - zone->compact_cached_migrate_pfn[1] = migrate_pfn; - zone->compact_cached_free_pfn = free_pfn; - } + reset_cached_positions(zone); } void reset_isolation_suitable(pg_data_t *pgdat) @@ -1519,7 +1431,7 @@ static enum compact_result __compaction_suitable(struct zone *zone, int order, if (is_via_compact_memory(order)) return COMPACT_CONTINUE; - watermark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK); + watermark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK]; /* * If watermarks for high-order allocation are already met, there * should be no need for compaction at all. @@ -1679,7 +1591,7 @@ static enum compact_result compact_zone(struct zone *zone, struct compact_contro zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn; } - if (cc->migrate_pfn <= cc->zone->compact_init_migrate_pfn) + if (cc->migrate_pfn == start_pfn) cc->whole_zone = true; } diff --git a/mm/filemap.c b/mm/filemap.c index aea71dc89dcd9acb53e0347357ffa200a44f0054..82d1a339a551fea7f0d9c1146cf33fbec94c9d20 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1095,7 +1095,7 @@ static void wake_up_page(struct page *page, int bit) wake_up_page_bit(page, bit); } -static inline int wait_on_page_bit_common(wait_queue_head_t *q, +static inline __sched int wait_on_page_bit_common(wait_queue_head_t *q, struct page *page, int bit_nr, int state, bool lock) { struct wait_page_queue wait_page; @@ -1167,14 +1167,14 @@ static inline int wait_on_page_bit_common(wait_queue_head_t *q, return ret; } -void wait_on_page_bit(struct page *page, int bit_nr) +void __sched wait_on_page_bit(struct page *page, int bit_nr) { wait_queue_head_t *q = page_waitqueue(page); wait_on_page_bit_common(q, page, bit_nr, TASK_UNINTERRUPTIBLE, false); } EXPORT_SYMBOL(wait_on_page_bit); -int wait_on_page_bit_killable(struct page *page, int bit_nr) +int __sched wait_on_page_bit_killable(struct page *page, int bit_nr) { wait_queue_head_t *q = page_waitqueue(page); return wait_on_page_bit_common(q, page, bit_nr, TASK_KILLABLE, false); @@ -1306,7 +1306,7 @@ EXPORT_SYMBOL_GPL(page_endio); * __lock_page - get a lock on the page, assuming we need to sleep to get it * @__page: the page to lock */ -void __lock_page(struct page *__page) +void __sched __lock_page(struct page *__page) { struct page *page = compound_head(__page); wait_queue_head_t *q = page_waitqueue(page); @@ -1314,7 +1314,7 @@ void __lock_page(struct page *__page) } EXPORT_SYMBOL(__lock_page); -int __lock_page_killable(struct page *__page) +int __sched __lock_page_killable(struct page *__page) { struct page *page = compound_head(__page); wait_queue_head_t *q = page_waitqueue(page); @@ -1333,7 +1333,7 @@ EXPORT_SYMBOL_GPL(__lock_page_killable); * If neither ALLOW_RETRY nor KILLABLE are set, will always return 1 * with the page locked and the mmap_sem unperturbed. */ -int __lock_page_or_retry(struct page *page, struct mm_struct *mm, +int __sched __lock_page_or_retry(struct page *page, struct mm_struct *mm, unsigned int flags) { if (flags & FAULT_FLAG_ALLOW_RETRY) { diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 6f4ce9547658dd7cb1a136304e4f07e9da4280b0..e068c7f75a84990f5013ee5de070f53cf2888b06 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -4820,8 +4820,8 @@ pte_t *huge_pte_offset(struct mm_struct *mm, { pgd_t *pgd; p4d_t *p4d; - pud_t *pud; - pmd_t *pmd; + pud_t *pud, pud_entry; + pmd_t *pmd, pmd_entry; pgd = pgd_offset(mm, addr); if (!pgd_present(*pgd)) @@ -4831,17 +4831,19 @@ pte_t *huge_pte_offset(struct mm_struct *mm, return NULL; pud = pud_offset(p4d, addr); - if (sz != PUD_SIZE && pud_none(*pud)) + pud_entry = READ_ONCE(*pud); + if (sz != PUD_SIZE && pud_none(pud_entry)) return NULL; /* hugepage or swap? */ - if (pud_huge(*pud) || !pud_present(*pud)) + if (pud_huge(pud_entry) || !pud_present(pud_entry)) return (pte_t *)pud; pmd = pmd_offset(pud, addr); - if (sz != PMD_SIZE && pmd_none(*pmd)) + pmd_entry = READ_ONCE(*pmd); + if (sz != PMD_SIZE && pmd_none(pmd_entry)) return NULL; /* hugepage or swap? */ - if (pmd_huge(*pmd) || !pmd_present(*pmd)) + if (pmd_huge(pmd_entry) || !pmd_present(pmd_entry)) return (pte_t *)pmd; return NULL; diff --git a/mm/internal.h b/mm/internal.h index 5b9734aff4a5f23a0c54276f09abfe67133cfb6f..397183c8fe47be1e6ad4d5c7b898606dd40cfabb 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -490,16 +490,10 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone, #define ALLOC_OOM ALLOC_NO_WATERMARKS #endif -#define ALLOC_HARDER 0x10 /* try to alloc harder */ -#define ALLOC_HIGH 0x20 /* __GFP_HIGH set */ -#define ALLOC_CPUSET 0x40 /* check for correct cpuset */ -#define ALLOC_CMA 0x80 /* allow allocations from CMA areas */ -#ifdef CONFIG_ZONE_DMA32 -#define ALLOC_NOFRAGMENT 0x100 /* avoid mixing pageblock types */ -#else -#define ALLOC_NOFRAGMENT 0x0 -#endif -#define ALLOC_KSWAPD 0x200 /* allow waking of kswapd */ +#define ALLOC_HARDER 0x10 /* try to alloc harder */ +#define ALLOC_HIGH 0x20 /* __GFP_HIGH set */ +#define ALLOC_CPUSET 0x40 /* check for correct cpuset */ +#define ALLOC_CMA 0x80 /* allow allocations from CMA areas */ enum ttu_flags; struct tlbflush_unmap_batch; diff --git a/mm/ksm.c b/mm/ksm.c index b3ea0f0316eb670acbecc5be52c51f7711327369..d021bcf94c41d4fc9e3c709f4373aa25a795c3d7 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -2106,8 +2106,16 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item) down_read(&mm->mmap_sem); vma = find_mergeable_vma(mm, rmap_item->address); - err = try_to_merge_one_page(vma, page, - ZERO_PAGE(rmap_item->address)); + if (vma) { + err = try_to_merge_one_page(vma, page, + ZERO_PAGE(rmap_item->address)); + } else { + /* + * If the vma is out of date, we do not need to + * continue. + */ + err = 0; + } up_read(&mm->mmap_sem); /* * In case of failure, the page was not really empty, so we diff --git a/mm/memblock.c b/mm/memblock.c index 6e317a527d3c3141979290917834559f1e3e2cb8..246c81672a97b58fb9c323b414b98a7a958ca53d 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -803,6 +803,7 @@ int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size) kmemleak_free_part_phys(base, size); return memblock_remove_range(&memblock.reserved, base, size); } +EXPORT_SYMBOL_GPL(memblock_free); int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size) { diff --git a/mm/memory.c b/mm/memory.c index d219df50389fdd8edcef6c27da1a9841f694a2d4..5dee46b1782f1b555828a0dd2437b4f81a00052f 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -156,6 +156,7 @@ void mm_trace_rss_stat(struct mm_struct *mm, int member, long count, if ((count & thresh_mask) != ((count - value) & thresh_mask)) trace_rss_stat(mm, member, count); } +EXPORT_SYMBOL_GPL(mm_trace_rss_stat); #if defined(SPLIT_RSS_COUNTING) diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 61da29cab467c3c268eeb1128f49aa5291039d57..69ee47b614e3b5df698bb9146da1e9f9e2d44ac2 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -2836,7 +2836,9 @@ int mpol_parse_str(char *str, struct mempolicy **mpol) switch (mode) { case MPOL_PREFERRED: /* - * Insist on a nodelist of one node only + * Insist on a nodelist of one node only, although later + * we use first_node(nodes) to grab a single node, so here + * nodelist (or nodes) cannot be empty. */ if (nodelist) { char *rest = nodelist; @@ -2844,6 +2846,8 @@ int mpol_parse_str(char *str, struct mempolicy **mpol) rest++; if (*rest) goto out; + if (nodes_empty(nodes)) + goto out; } break; case MPOL_INTERLEAVE: diff --git a/mm/mm_event.c b/mm/mm_event.c new file mode 100644 index 0000000000000000000000000000000000000000..02e1daba6582fd02a8ef364b90be88ab29631fb9 --- /dev/null +++ b/mm/mm_event.c @@ -0,0 +1,7 @@ +#include +#include + +void mm_event_count(enum mm_event_type event, int count) +{ +} +EXPORT_SYMBOL_GPL(mm_event_count); diff --git a/mm/mmap.c b/mm/mmap.c index 5cc77af2c0f8e01ff1c2d0cbfcad713d29d0bfd0..a11ab08258b3607628a76eebd3a1f7b18dc4168a 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2049,6 +2049,7 @@ unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) VM_BUG_ON(gap_end < gap_start); return gap_end; } +EXPORT_SYMBOL_GPL(unmapped_area_topdown); /* Get an address range which is currently unmapped. * For shmat() with addr=0. diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 56f82c3052c726d6cdb01883f45db11ee70ab469..0a3876bbd5edfbd21467b1aacd54bd1aed8f795d 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -318,7 +318,6 @@ compound_page_dtor * const compound_page_dtors[] = { */ int min_free_kbytes = 1024; int user_min_free_kbytes = -1; -int watermark_boost_factor __read_mostly = 15000; int watermark_scale_factor = 10; /* @@ -1497,6 +1496,7 @@ void set_zone_contiguous(struct zone *zone) if (!__pageblock_pfn_to_page(block_start_pfn, block_end_pfn, zone)) return; + cond_resched(); } /* We confirm that there is no hole */ @@ -2219,21 +2219,6 @@ static bool can_steal_fallback(unsigned int order, int start_mt) return false; } -static inline void boost_watermark(struct zone *zone) -{ - unsigned long max_boost; - - if (!watermark_boost_factor) - return; - - max_boost = mult_frac(zone->_watermark[WMARK_HIGH], - watermark_boost_factor, 10000); - max_boost = max(pageblock_nr_pages, max_boost); - - zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages, - max_boost); -} - /* * This function implements actual steal behaviour. If order is large enough, * we can steal whole pageblock. If not, we first move freepages in this @@ -2243,7 +2228,7 @@ static inline void boost_watermark(struct zone *zone) * itself, so pages freed in the future will be put on the correct free list. */ static void steal_suitable_fallback(struct zone *zone, struct page *page, - unsigned int alloc_flags, int start_type, bool whole_block) + int start_type, bool whole_block) { unsigned int current_order = page_order(page); struct free_area *area; @@ -2265,15 +2250,6 @@ static void steal_suitable_fallback(struct zone *zone, struct page *page, goto single_page; } - /* - * Boost watermarks to increase reclaim pressure to reduce the - * likelihood of future fallbacks. Wake kswapd now as the node - * may be balanced overall and kswapd will not wake naturally. - */ - boost_watermark(zone); - if (alloc_flags & ALLOC_KSWAPD) - wakeup_kswapd(zone, 0, 0, zone_idx(zone)); - /* We are not allowed to try stealing from the whole block */ if (!whole_block) goto single_page; @@ -2489,30 +2465,20 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac, * condition simpler. */ static __always_inline bool -__rmqueue_fallback(struct zone *zone, int order, int start_migratetype, - unsigned int alloc_flags) +__rmqueue_fallback(struct zone *zone, int order, int start_migratetype) { struct free_area *area; int current_order; - int min_order = order; struct page *page; int fallback_mt; bool can_steal; - /* - * Do not steal pages from freelists belonging to other pageblocks - * i.e. orders < pageblock_order. If there are no local zones free, - * the zonelists will be reiterated without ALLOC_NOFRAGMENT. - */ - if (alloc_flags & ALLOC_NOFRAGMENT) - min_order = pageblock_order; - /* * Find the largest available free page in the other list. This roughly * approximates finding the pageblock with the most free pages, which * would be too costly to do exactly. */ - for (current_order = MAX_ORDER - 1; current_order >= min_order; + for (current_order = MAX_ORDER - 1; current_order >= order; --current_order) { area = &(zone->free_area[current_order]); fallback_mt = find_suitable_fallback(area, current_order, @@ -2557,8 +2523,7 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype, page = list_first_entry(&area->free_list[fallback_mt], struct page, lru); - steal_suitable_fallback(zone, page, alloc_flags, start_migratetype, - can_steal); + steal_suitable_fallback(zone, page, start_migratetype, can_steal); trace_mm_page_alloc_extfrag(page, order, current_order, start_migratetype, fallback_mt); @@ -2572,16 +2537,14 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype, * Call me with the zone->lock already held. */ static __always_inline struct page * -__rmqueue(struct zone *zone, unsigned int order, int migratetype, - unsigned int alloc_flags) +__rmqueue(struct zone *zone, unsigned int order, int migratetype) { struct page *page; retry: page = __rmqueue_smallest(zone, order, migratetype); - if (unlikely(!page) && __rmqueue_fallback(zone, order, migratetype, - alloc_flags)) + if (unlikely(!page) && __rmqueue_fallback(zone, order, migratetype)) goto retry; trace_mm_page_alloc_zone_locked(page, order, migratetype); @@ -2613,7 +2576,7 @@ static inline struct page *__rmqueue_cma(struct zone *zone, unsigned int order) */ static int rmqueue_bulk(struct zone *zone, unsigned int order, unsigned long count, struct list_head *list, - int migratetype, unsigned int alloc_flags) + int migratetype) { int i, alloced = 0; @@ -2629,7 +2592,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order, if (is_migrate_cma(migratetype)) page = __rmqueue_cma(zone, order); else - page = __rmqueue(zone, order, migratetype, alloc_flags); + page = __rmqueue(zone, order, migratetype); if (unlikely(page == NULL)) break; @@ -2672,14 +2635,14 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order, */ static struct list_head *get_populated_pcp_list(struct zone *zone, unsigned int order, struct per_cpu_pages *pcp, - int migratetype, unsigned int alloc_flags) + int migratetype) { struct list_head *list = &pcp->lists[migratetype]; if (list_empty(list)) { pcp->count += rmqueue_bulk(zone, order, pcp->batch, list, - migratetype, alloc_flags); + migratetype); if (list_empty(list)) list = NULL; @@ -3108,7 +3071,6 @@ static inline void zone_statistics(struct zone *preferred_zone, struct zone *z) /* Remove page from the per-cpu list, caller must protect the list */ static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype, - unsigned int alloc_flags, struct per_cpu_pages *pcp, gfp_t gfp_flags) { @@ -3120,7 +3082,7 @@ static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype, if (migratetype == MIGRATE_MOVABLE && gfp_flags & __GFP_CMA) { list = get_populated_pcp_list(zone, 0, pcp, - get_cma_migrate_type(), alloc_flags); + get_cma_migrate_type()); } if (list == NULL) { @@ -3129,7 +3091,7 @@ static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype, * free CMA pages. */ list = get_populated_pcp_list(zone, 0, pcp, - migratetype, alloc_flags); + migratetype); if (unlikely(list == NULL) || unlikely(list_empty(list))) return NULL; @@ -3146,8 +3108,7 @@ static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype, /* Lock and remove page from the per-cpu list */ static struct page *rmqueue_pcplist(struct zone *preferred_zone, struct zone *zone, unsigned int order, - gfp_t gfp_flags, int migratetype, - unsigned int alloc_flags) + gfp_t gfp_flags, int migratetype) { struct per_cpu_pages *pcp; struct page *page; @@ -3155,7 +3116,7 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone, local_irq_save(flags); pcp = &this_cpu_ptr(zone->pageset)->pcp; - page = __rmqueue_pcplist(zone, migratetype, alloc_flags, pcp, + page = __rmqueue_pcplist(zone, migratetype, pcp, gfp_flags); if (page) { __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); @@ -3179,7 +3140,7 @@ struct page *rmqueue(struct zone *preferred_zone, if (likely(order == 0)) { page = rmqueue_pcplist(preferred_zone, zone, order, - gfp_flags, migratetype, alloc_flags); + gfp_flags, migratetype); goto out; } @@ -3204,7 +3165,7 @@ struct page *rmqueue(struct zone *preferred_zone, page = __rmqueue_cma(zone, order); if (!page) - page = __rmqueue(zone, order, migratetype, alloc_flags); + page = __rmqueue(zone, order, migratetype); } while (page && check_new_pages(page, order)); spin_unlock(&zone->lock); @@ -3456,40 +3417,6 @@ static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) } #endif /* CONFIG_NUMA */ -/* - * The restriction on ZONE_DMA32 as being a suitable zone to use to avoid - * fragmentation is subtle. If the preferred zone was HIGHMEM then - * premature use of a lower zone may cause lowmem pressure problems that - * are worse than fragmentation. If the next zone is ZONE_DMA then it is - * probably too small. It only makes sense to spread allocations to avoid - * fragmentation between the Normal and DMA32 zones. - */ -static inline unsigned int -alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask) -{ - unsigned int alloc_flags = 0; - - if (gfp_mask & __GFP_KSWAPD_RECLAIM) - alloc_flags |= ALLOC_KSWAPD; - -#ifdef CONFIG_ZONE_DMA32 - if (zone_idx(zone) != ZONE_NORMAL) - goto out; - - /* - * If ZONE_DMA32 exists, assume it is the one after ZONE_NORMAL and - * the pointer is within zone->zone_pgdat->node_zones[]. Also assume - * on UMA that if Normal is populated then so is DMA32. - */ - BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1); - if (nr_online_nodes > 1 && !populated_zone(--zone)) - goto out; - -out: -#endif /* CONFIG_ZONE_DMA32 */ - return alloc_flags; -} - /* * get_page_from_freelist goes through the zonelist trying to allocate * a page. @@ -3498,18 +3425,14 @@ static struct page * get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, const struct alloc_context *ac) { - struct zoneref *z; + struct zoneref *z = ac->preferred_zoneref; struct zone *zone; struct pglist_data *last_pgdat_dirty_limit = NULL; - bool no_fallback; -retry: /* * Scan zonelist, looking for a zone with enough free. * See also __cpuset_node_allowed() comment in kernel/cpuset.c. */ - no_fallback = alloc_flags & ALLOC_NOFRAGMENT; - z = ac->preferred_zoneref; for_next_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx, ac->nodemask) { struct page *page; @@ -3548,23 +3471,7 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, } } - if (no_fallback && nr_online_nodes > 1 && - zone != ac->preferred_zoneref->zone) { - int local_nid; - - /* - * If moving to a remote node, retry but allow - * fragmenting fallbacks. Locality is more important - * than fragmentation avoidance. - */ - local_nid = zone_to_nid(ac->preferred_zoneref->zone); - if (zone_to_nid(zone) != local_nid) { - alloc_flags &= ~ALLOC_NOFRAGMENT; - goto retry; - } - } - - mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK); + mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK]; if (!zone_watermark_fast(zone, order, mark, ac_classzone_idx(ac), alloc_flags)) { int ret; @@ -3631,15 +3538,6 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, } } - /* - * It's possible on a UMA machine to get through all zones that are - * fragmented. If avoiding fragmentation, reset and try again. - */ - if (no_fallback) { - alloc_flags &= ~ALLOC_NOFRAGMENT; - goto retry; - } - return NULL; } @@ -4141,9 +4039,6 @@ gfp_to_alloc_flags(gfp_t gfp_mask) } else if (unlikely(rt_task(current)) && !in_interrupt()) alloc_flags |= ALLOC_HARDER; - if (gfp_mask & __GFP_KSWAPD_RECLAIM) - alloc_flags |= ALLOC_KSWAPD; - #ifdef CONFIG_CMA if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE) alloc_flags |= ALLOC_CMA; @@ -4375,7 +4270,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, if (!ac->preferred_zoneref->zone) goto nopage; - if (alloc_flags & ALLOC_KSWAPD) + if (gfp_mask & __GFP_KSWAPD_RECLAIM) wake_all_kswapds(order, gfp_mask, ac); /* @@ -4433,7 +4328,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, retry: /* Ensure kswapd doesn't accidentally go to sleep as long as we loop */ - if (alloc_flags & ALLOC_KSWAPD) + if (gfp_mask & __GFP_KSWAPD_RECLAIM) wake_all_kswapds(order, gfp_mask, ac); reserve_flags = __gfp_pfmemalloc_flags(gfp_mask); @@ -4652,12 +4547,6 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid, finalise_ac(gfp_mask, &ac); - /* - * Forbid the first pass from falling back to types that fragment - * memory until all local zones are considered. - */ - alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp_mask); - /* First allocation attempt */ page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac); if (likely(page)) @@ -4802,11 +4691,11 @@ void *page_frag_alloc(struct page_frag_cache *nc, /* Even if we own the page, we do not use atomic_set(). * This would break get_page_unless_zero() users. */ - page_ref_add(page, size); + page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE); /* reset page count bias and offset to start of new frag */ nc->pfmemalloc = page_is_pfmemalloc(page); - nc->pagecnt_bias = size + 1; + nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1; nc->offset = size; } @@ -4822,10 +4711,10 @@ void *page_frag_alloc(struct page_frag_cache *nc, size = nc->size; #endif /* OK, page count is 0, we can safely set it */ - set_page_count(page, size + 1); + set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1); /* reset page count bias and offset to start of new frag */ - nc->pagecnt_bias = size + 1; + nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1; offset = size - fragsz; } @@ -4998,7 +4887,7 @@ long si_mem_available(void) pages[lru] = global_node_page_state(NR_LRU_BASE + lru); for_each_zone(zone) - wmark_low += low_wmark_pages(zone); + wmark_low += zone->watermark[WMARK_LOW]; /* * Estimate the amount of memory available for userspace allocations, @@ -7572,13 +7461,13 @@ static void __setup_per_zone_wmarks(void) min_pages = zone->managed_pages / 1024; min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL); - zone->_watermark[WMARK_MIN] = min_pages; + zone->watermark[WMARK_MIN] = min_pages; } else { /* * If it's a lowmem zone, reserve a number of pages * proportionate to the zone's size. */ - zone->_watermark[WMARK_MIN] = min; + zone->watermark[WMARK_MIN] = min; } /* @@ -7590,11 +7479,10 @@ static void __setup_per_zone_wmarks(void) mult_frac(zone->managed_pages, watermark_scale_factor, 10000)); - zone->_watermark[WMARK_LOW] = min_wmark_pages(zone) + + zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + low + min; - zone->_watermark[WMARK_HIGH] = min_wmark_pages(zone) + + zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + low + min * 2; - zone->watermark_boost = 0; spin_unlock_irqrestore(&zone->lock, flags); } @@ -7695,18 +7583,6 @@ int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write, return 0; } -int watermark_boost_factor_sysctl_handler(struct ctl_table *table, int write, - void __user *buffer, size_t *length, loff_t *ppos) -{ - int rc; - - rc = proc_dointvec_minmax(table, write, buffer, length, ppos); - if (rc) - return rc; - - return 0; -} - int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos) { diff --git a/mm/shmem.c b/mm/shmem.c index f730e2eeda96a0a7a9eb0ffbb33f1a026150abc7..72921fb0a1728ba860901cbbf91df4569358dd83 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -2368,11 +2368,11 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm, lru_cache_add_anon(page); - spin_lock(&info->lock); + spin_lock_irq(&info->lock); info->alloced++; inode->i_blocks += BLOCKS_PER_PAGE; shmem_recalc_inode(inode); - spin_unlock(&info->lock); + spin_unlock_irq(&info->lock); inc_mm_counter(dst_mm, mm_counter_file(page)); page_add_file_rmap(page, false); diff --git a/mm/slub.c b/mm/slub.c index beb94f88a7d0ae26137576c8cd970a33f6ace5f1..f3014dec1a1ce81133e8635ca0cc6ba33452b675 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -260,7 +260,7 @@ static inline void *freelist_ptr(const struct kmem_cache *s, void *ptr, * freepointer to be restored incorrectly. */ return (void *)((unsigned long)ptr ^ s->random ^ - (unsigned long)kasan_reset_tag((void *)ptr_addr)); + swab((unsigned long)kasan_reset_tag((void *)ptr_addr))); #else return ptr; #endif diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 40026c5e2ad5d645cdffd7e9b8e2864481e0ab2c..18926369e842c4e2741011176082d84baee0227c 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include @@ -1446,6 +1447,7 @@ struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) NUMA_NO_NODE, GFP_KERNEL, __builtin_return_address(0)); } +EXPORT_SYMBOL_GPL(get_vm_area); struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags, const void *caller) @@ -1676,7 +1678,6 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, nr_pages = get_vm_area_size(area) >> PAGE_SHIFT; array_size = (nr_pages * sizeof(struct page *)); - area->nr_pages = nr_pages; /* Please note that the recursion is strictly bounded. */ if (array_size > PAGE_SIZE) { pages = __vmalloc_node(array_size, 1, nested_gfp|highmem_mask, @@ -1684,13 +1685,16 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, } else { pages = kmalloc_node(array_size, nested_gfp, node); } - area->pages = pages; - if (!area->pages) { + + if (!pages) { remove_vm_area(area->addr); kfree(area); return NULL; } + area->pages = pages; + area->nr_pages = nr_pages; + for (i = 0; i < area->nr_pages; i++) { struct page *page; @@ -2236,6 +2240,7 @@ long vwrite(char *buf, char *addr, unsigned long count) * @vma: vma to cover * @uaddr: target user address to start at * @kaddr: virtual address of vmalloc kernel memory + * @pgoff: offset from @kaddr to start at * @size: size of map area * * Returns: 0 for success, -Exxx on failure @@ -2248,9 +2253,15 @@ long vwrite(char *buf, char *addr, unsigned long count) * Similar to remap_pfn_range() (see mm/memory.c) */ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr, - void *kaddr, unsigned long size) + void *kaddr, unsigned long pgoff, + unsigned long size) { struct vm_struct *area; + unsigned long off; + unsigned long end_index; + + if (check_shl_overflow(pgoff, PAGE_SHIFT, &off)) + return -EINVAL; size = PAGE_ALIGN(size); @@ -2264,8 +2275,10 @@ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr, if (!(area->flags & VM_USERMAP)) return -EINVAL; - if (kaddr + size > area->addr + get_vm_area_size(area)) + if (check_add_overflow(size, off, &end_index) || + end_index > get_vm_area_size(area)) return -EINVAL; + kaddr += off; do { struct page *page = vmalloc_to_page(kaddr); @@ -2304,7 +2317,7 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, unsigned long pgoff) { return remap_vmalloc_range_partial(vma, vma->vm_start, - addr + (pgoff << PAGE_SHIFT), + addr, pgoff, vma->vm_end - vma->vm_start); } EXPORT_SYMBOL(remap_vmalloc_range); diff --git a/mm/vmscan.c b/mm/vmscan.c index 84d1b7488a48116eb313499ce39d3bb811e59253..32b2424a3203084a37026e03717d569e6e25d856 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -87,9 +87,6 @@ struct scan_control { /* Can pages be swapped as part of reclaim? */ unsigned int may_swap:1; - /* e.g. boosted watermark reclaim leaves slabs alone */ - unsigned int may_shrinkslab:1; - /* * Cgroups are not reclaimed below their configured memory.low, * unless we threaten to OOM. If any cgroups are skipped due to @@ -2742,10 +2739,8 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc) shrink_node_memcg(pgdat, memcg, sc, &lru_pages); node_lru_pages += lru_pages; - if (sc->may_shrinkslab) { - shrink_slab(sc->gfp_mask, pgdat->node_id, + shrink_slab(sc->gfp_mask, pgdat->node_id, memcg, sc->priority); - } /* Record the group's reclaim efficiency */ vmpressure(sc->gfp_mask, memcg, false, @@ -3223,7 +3218,6 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order, .may_writepage = !laptop_mode, .may_unmap = 1, .may_swap = 1, - .may_shrinkslab = 1, }; /* @@ -3268,7 +3262,6 @@ unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg, .may_unmap = 1, .reclaim_idx = MAX_NR_ZONES - 1, .may_swap = !noswap, - .may_shrinkslab = 1, }; unsigned long lru_pages; @@ -3315,7 +3308,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, .may_writepage = !laptop_mode, .may_unmap = 1, .may_swap = may_swap, - .may_shrinkslab = 1, }; /* @@ -3366,30 +3358,6 @@ static void age_active_anon(struct pglist_data *pgdat, } while (memcg); } -static bool pgdat_watermark_boosted(pg_data_t *pgdat, int classzone_idx) -{ - int i; - struct zone *zone; - - /* - * Check for watermark boosts top-down as the higher zones - * are more likely to be boosted. Both watermarks and boosts - * should not be checked at the time time as reclaim would - * start prematurely when there is no boosting and a lower - * zone is balanced. - */ - for (i = classzone_idx; i >= 0; i--) { - zone = pgdat->node_zones + i; - if (!managed_zone(zone)) - continue; - - if (zone->watermark_boost) - return true; - } - - return false; -} - /* * Returns true if there is an eligible zone balanced for the request order * and classzone_idx @@ -3400,10 +3368,6 @@ static bool pgdat_balanced(pg_data_t *pgdat, int order, int classzone_idx) unsigned long mark = -1; struct zone *zone; - /* - * Check watermarks bottom-up as lower zones are more likely to - * meet watermarks. - */ for (i = 0; i <= classzone_idx; i++) { zone = pgdat->node_zones + i; @@ -3532,14 +3496,14 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx) unsigned long nr_soft_reclaimed; unsigned long nr_soft_scanned; unsigned long pflags; - unsigned long nr_boost_reclaim; - unsigned long zone_boosts[MAX_NR_ZONES] = { 0, }; - bool boosted; struct zone *zone; struct scan_control sc = { .gfp_mask = GFP_KERNEL, .order = order, + .priority = DEF_PRIORITY, + .may_writepage = !laptop_mode, .may_unmap = 1, + .may_swap = 1, }; psi_memstall_enter(&pflags); @@ -3547,28 +3511,9 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx) count_vm_event(PAGEOUTRUN); - /* - * Account for the reclaim boost. Note that the zone boost is left in - * place so that parallel allocations that are near the watermark will - * stall or direct reclaim until kswapd is finished. - */ - nr_boost_reclaim = 0; - for (i = 0; i <= classzone_idx; i++) { - zone = pgdat->node_zones + i; - if (!managed_zone(zone)) - continue; - - nr_boost_reclaim += zone->watermark_boost; - zone_boosts[i] = zone->watermark_boost; - } - boosted = nr_boost_reclaim; - -restart: - sc.priority = DEF_PRIORITY; do { unsigned long nr_reclaimed = sc.nr_reclaimed; bool raise_priority = true; - bool balanced; bool ret; sc.reclaim_idx = classzone_idx; @@ -3595,40 +3540,13 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx) } /* - * If the pgdat is imbalanced then ignore boosting and preserve - * the watermarks for a later time and restart. Note that the - * zone watermarks will be still reset at the end of balancing - * on the grounds that the normal reclaim should be enough to - * re-evaluate if boosting is required when kswapd next wakes. - */ - balanced = pgdat_balanced(pgdat, sc.order, classzone_idx); - if (!balanced && nr_boost_reclaim) { - nr_boost_reclaim = 0; - goto restart; - } - - /* - * If boosting is not active then only reclaim if there are no - * eligible zones. Note that sc.reclaim_idx is not used as - * buffer_heads_over_limit may have adjusted it. + * Only reclaim if there are no eligible zones. Note that + * sc.reclaim_idx is not used as buffer_heads_over_limit may + * have adjusted it. */ - if (!nr_boost_reclaim && balanced) + if (pgdat_balanced(pgdat, sc.order, classzone_idx)) goto out; - /* Limit the priority of boosting to avoid reclaim writeback */ - if (nr_boost_reclaim && sc.priority == DEF_PRIORITY - 2) - raise_priority = false; - - /* - * Do not writeback or swap pages for boosted reclaim. The - * intent is to relieve pressure not issue sub-optimal IO - * from reclaim context. If no pages are reclaimed, the - * reclaim will be aborted. - */ - sc.may_writepage = !laptop_mode && !nr_boost_reclaim; - sc.may_swap = !nr_boost_reclaim; - sc.may_shrinkslab = !nr_boost_reclaim; - /* * Do some background aging of the anon list, to give * pages a chance to be referenced before reclaiming. All @@ -3680,16 +3598,6 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx) * progress in reclaiming pages */ nr_reclaimed = sc.nr_reclaimed - nr_reclaimed; - nr_boost_reclaim -= min(nr_boost_reclaim, nr_reclaimed); - - /* - * If reclaim made no progress for a boost, stop reclaim as - * IO cannot be queued and it could be an infinite loop in - * extreme circumstances. - */ - if (nr_boost_reclaim && !nr_reclaimed) - break; - if (raise_priority || !nr_reclaimed) sc.priority--; } while (sc.priority >= 1); @@ -3698,28 +3606,6 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx) pgdat->kswapd_failures++; out: - /* If reclaim was boosted, account for the reclaim done in this pass */ - if (boosted) { - unsigned long flags; - - for (i = 0; i <= classzone_idx; i++) { - if (!zone_boosts[i]) - continue; - - /* Increments are under the zone lock */ - zone = pgdat->node_zones + i; - spin_lock_irqsave(&zone->lock, flags); - zone->watermark_boost -= min(zone->watermark_boost, zone_boosts[i]); - spin_unlock_irqrestore(&zone->lock, flags); - } - - /* - * As there is now likely space, wakeup kcompact to defragment - * pageblocks. - */ - wakeup_kcompactd(pgdat, pageblock_order, classzone_idx); - } - snapshot_refaults(NULL, pgdat); __fs_reclaim_release(); psi_memstall_leave(&pflags); @@ -3951,8 +3837,7 @@ void wakeup_kswapd(struct zone *zone, gfp_t gfp_flags, int order, /* Hopeless node, leave it to direct reclaim if possible */ if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES || - (pgdat_balanced(pgdat, order, classzone_idx) && - !pgdat_watermark_boosted(pgdat, classzone_idx))) { + pgdat_balanced(pgdat, order, classzone_idx)) { /* * There may be plenty of free memory available, but it's too * fragmented for high-order allocations. Wake up kcompactd diff --git a/mm/vmstat.c b/mm/vmstat.c index ac18931a50e2852064e08ad6871cf5c675815347..2ef02636021a99c99a7e62f0d8c259462045a5f5 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -1168,6 +1168,9 @@ const char * const vmstat_text[] = { "nr_kernel_misc_reclaimable", "nr_unreclaimable_pages", + + "nr_ion_heap", + "nr_ion_heap_pool", /* enum writeback_stat_item counters */ "nr_dirty_threshold", "nr_dirty_background_threshold", diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c index bf9ea404abe7cbe1dd2113881856cd35b718b7d1..0458de53cb64b2da51de492ffa27f33068351cc8 100644 --- a/net/batman-adv/bat_v_ogm.c +++ b/net/batman-adv/bat_v_ogm.c @@ -735,7 +735,7 @@ static void batadv_v_ogm_process(const struct sk_buff *skb, int ogm_offset, orig_node = batadv_v_ogm_orig_get(bat_priv, ogm_packet->orig); if (!orig_node) - return; + goto out; neigh_node = batadv_neigh_node_get_or_create(orig_node, if_incoming, ethhdr->h_source); diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c index 34caf129a9bf5531360f798be6a7059bad26a50f..7f1be5a287575d51ffd5b4e7ecf540b8fd7de700 100644 --- a/net/batman-adv/network-coding.c +++ b/net/batman-adv/network-coding.c @@ -1021,15 +1021,8 @@ static struct batadv_nc_path *batadv_nc_get_path(struct batadv_priv *bat_priv, */ static u8 batadv_nc_random_weight_tq(u8 tq) { - u8 rand_val, rand_tq; - - get_random_bytes(&rand_val, sizeof(rand_val)); - /* randomize the estimated packet loss (max TQ - estimated TQ) */ - rand_tq = rand_val * (BATADV_TQ_MAX_VALUE - tq); - - /* normalize the randomized packet loss */ - rand_tq /= BATADV_TQ_MAX_VALUE; + u8 rand_tq = prandom_u32_max(BATADV_TQ_MAX_VALUE + 1 - tq); /* convert to (randomized) estimated tq again */ return BATADV_TQ_MAX_VALUE - rand_tq; diff --git a/net/batman-adv/sysfs.c b/net/batman-adv/sysfs.c index 09427fc6494a157554d8b19f3481a878a9f97bba..976b038e53bf934332a39bad8a5509ca1aac0add 100644 --- a/net/batman-adv/sysfs.c +++ b/net/batman-adv/sysfs.c @@ -1093,7 +1093,7 @@ static ssize_t batadv_store_throughput_override(struct kobject *kobj, ret = batadv_parse_throughput(net_dev, buff, "throughput_override", &tp_override); if (!ret) - return count; + goto out; old_tp_override = atomic_read(&hard_iface->bat_v.throughput_override); if (old_tp_override == tp_override) @@ -1126,6 +1126,7 @@ static ssize_t batadv_show_throughput_override(struct kobject *kobj, tp_override = atomic_read(&hard_iface->bat_v.throughput_override); + batadv_hardif_put(hard_iface); return sprintf(buff, "%u.%u MBit\n", tp_override / 10, tp_override % 10); } diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c index 5e44d842cc5d0b87ff4172c809a933eec9ea2c47..cf0ccd05329c7dc3bf0b291c03e9aa01a4ee39f1 100644 --- a/net/bluetooth/rfcomm/tty.c +++ b/net/bluetooth/rfcomm/tty.c @@ -413,10 +413,8 @@ static int __rfcomm_create_dev(struct sock *sk, void __user *arg) dlc = rfcomm_dlc_exists(&req.src, &req.dst, req.channel); if (IS_ERR(dlc)) return PTR_ERR(dlc); - else if (dlc) { - rfcomm_dlc_put(dlc); + if (dlc) return -EBUSY; - } dlc = rfcomm_dlc_alloc(GFP_KERNEL); if (!dlc) return -ENOMEM; diff --git a/net/core/dev.c b/net/core/dev.c index a26dcc7a2789508b3c454bb3658391c013b4fe0e..765abc251ec117bf05e644cb716a0196eaaf4a2d 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2854,6 +2854,8 @@ static u16 skb_tx_hash(const struct net_device *dev, if (skb_rx_queue_recorded(skb)) { hash = skb_get_rx_queue(skb); + if (hash >= qoffset) + hash -= qoffset; while (unlikely(hash >= qcount)) hash -= qcount; return hash + qoffset; @@ -3932,7 +3934,8 @@ EXPORT_SYMBOL(netdev_max_backlog); int netdev_tstamp_prequeue __read_mostly = 1; int netdev_budget __read_mostly = 300; -unsigned int __read_mostly netdev_budget_usecs = 2000; +/* Must be at least 2 jiffes to guarantee 1 jiffy timeout */ +unsigned int __read_mostly netdev_budget_usecs = 2 * USEC_PER_SEC / HZ; int weight_p __read_mostly = 64; /* old backlog weight */ int dev_weight_rx_bias __read_mostly = 1; /* bias for backlog weight */ int dev_weight_tx_bias __read_mostly = 1; /* bias for output_queue quota */ diff --git a/net/core/netclassid_cgroup.c b/net/core/netclassid_cgroup.c index 67feeb207dad606401d86c299c912739c0de7e28..668330ace961607c07ce98e9de0edf843c96b26e 100644 --- a/net/core/netclassid_cgroup.c +++ b/net/core/netclassid_cgroup.c @@ -131,10 +131,8 @@ static int write_classid(struct cgroup_subsys_state *css, struct cftype *cft, cs->classid = (u32)value; css_task_iter_start(css, 0, &it); - while ((p = css_task_iter_next(&it))) { + while ((p = css_task_iter_next(&it))) update_classid_task(p, cs->classid); - cond_resched(); - } css_task_iter_end(&it); return 0; diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index 58a401e9cf09db8336aaa780d10f8e008e95c8a5..b438bed6749d47b9103c3c1cea1d1d7056e5c2fc 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c @@ -211,7 +211,7 @@ static int dccp_v6_send_response(const struct sock *sk, struct request_sock *req final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt), &final); rcu_read_unlock(); - dst = ip6_dst_lookup_flow(sk, &fl6, final_p); + dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p); if (IS_ERR(dst)) { err = PTR_ERR(dst); dst = NULL; @@ -282,7 +282,7 @@ static void dccp_v6_ctl_send_reset(const struct sock *sk, struct sk_buff *rxskb) security_skb_classify_flow(rxskb, flowi6_to_flowi(&fl6)); /* sk = NULL, but it is safe for now. RST socket required. */ - dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL); + dst = ip6_dst_lookup_flow(sock_net(ctl_sk), ctl_sk, &fl6, NULL); if (!IS_ERR(dst)) { skb_dst_set(skb, dst); ip6_xmit(ctl_sk, skb, &fl6, 0, NULL, 0); @@ -912,7 +912,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr, opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk)); final_p = fl6_update_dst(&fl6, opt, &final); - dst = ip6_dst_lookup_flow(sk, &fl6, final_p); + dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p); if (IS_ERR(dst)) { err = PTR_ERR(dst); goto failure; diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c index 7f4534828f6c96c0b0fe99d54add4ce9be0c1877..a0494206cfda6788be9ac2486c03f32a72e35055 100644 --- a/net/dns_resolver/dns_key.c +++ b/net/dns_resolver/dns_key.c @@ -241,7 +241,7 @@ static void dns_resolver_describe(const struct key *key, struct seq_file *m) * - the key's semaphore is read-locked */ static long dns_resolver_read(const struct key *key, - char __user *buffer, size_t buflen) + char *buffer, size_t buflen) { int err = PTR_ERR(key->payload.data[dns_key_error]); diff --git a/net/hsr/hsr_netlink.c b/net/hsr/hsr_netlink.c index 37708dabebd1875cac799229d7045f887a26cf8f..606bc7fe5cc7c994de77d8ecd5a1ac5900954b9c 100644 --- a/net/hsr/hsr_netlink.c +++ b/net/hsr/hsr_netlink.c @@ -64,10 +64,16 @@ static int hsr_newlink(struct net *src_net, struct net_device *dev, else multicast_spec = nla_get_u8(data[IFLA_HSR_MULTICAST_SPEC]); - if (!data[IFLA_HSR_VERSION]) + if (!data[IFLA_HSR_VERSION]) { hsr_version = 0; - else + } else { hsr_version = nla_get_u8(data[IFLA_HSR_VERSION]); + if (hsr_version > 1) { + NL_SET_ERR_MSG_MOD(extack, + "Only versions 0..1 are supported"); + return -EINVAL; + } + } return hsr_dev_finalize(dev, link, multicast_spec, hsr_version); } diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index fabece4b899766347291456799508bf25a60a878..a08d682ba676eeeac45edc74b57a4dc5a48d275a 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -587,12 +587,15 @@ struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix, return NULL; } -static int ip_mc_config(struct sock *sk, bool join, const struct in_ifaddr *ifa) +static int ip_mc_autojoin_config(struct net *net, bool join, + const struct in_ifaddr *ifa) { +#if defined(CONFIG_IP_MULTICAST) struct ip_mreqn mreq = { .imr_multiaddr.s_addr = ifa->ifa_address, .imr_ifindex = ifa->ifa_dev->dev->ifindex, }; + struct sock *sk = net->ipv4.mc_autojoin_sk; int ret; ASSERT_RTNL(); @@ -605,6 +608,9 @@ static int ip_mc_config(struct sock *sk, bool join, const struct in_ifaddr *ifa) release_sock(sk); return ret; +#else + return -EOPNOTSUPP; +#endif } static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, @@ -646,7 +652,7 @@ static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, continue; if (ipv4_is_multicast(ifa->ifa_address)) - ip_mc_config(net->ipv4.mc_autojoin_sk, false, ifa); + ip_mc_autojoin_config(net, false, ifa); __inet_del_ifa(in_dev, ifap, 1, nlh, NETLINK_CB(skb).portid); return 0; } @@ -907,8 +913,7 @@ static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, */ set_ifa_lifetime(ifa, valid_lft, prefered_lft); if (ifa->ifa_flags & IFA_F_MCAUTOJOIN) { - int ret = ip_mc_config(net->ipv4.mc_autojoin_sk, - true, ifa); + int ret = ip_mc_autojoin_config(net, true, ifa); if (ret < 0) { inet_free_ifa(ifa); diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 3955a6d7ea6621d895fab82042a773f263c6bcc7..3047fc4737c4d848b5fc5cff46adee5781ef05f1 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -2337,6 +2337,7 @@ static int fib_triestat_seq_show(struct seq_file *seq, void *v) " %zd bytes, size of tnode: %zd bytes.\n", LEAF_SIZE, TNODE_SIZE(0)); + rcu_read_lock(); for (h = 0; h < FIB_TABLE_HASHSZ; h++) { struct hlist_head *head = &net->ipv4.fib_table_hash[h]; struct fib_table *tb; @@ -2356,7 +2357,9 @@ static int fib_triestat_seq_show(struct seq_file *seq, void *v) trie_show_usage(seq, t->stats); #endif } + cond_resched_rcu(); } + rcu_read_unlock(); return 0; } diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index 14fd8a37a72950dd8d25928d20f277fe9ff0df40..b37abba3b369e0b20c1fc4e1bb88f1d22845c6f0 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@ -155,11 +155,8 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn, cand = t; } - if (flags & TUNNEL_NO_KEY) - goto skip_key_lookup; - hlist_for_each_entry_rcu(t, head, hash_node) { - if (t->parms.i_key != key || + if ((!(flags & TUNNEL_NO_KEY) && t->parms.i_key != key) || t->parms.iph.saddr != 0 || t->parms.iph.daddr != 0 || !(t->dev->flags & IFF_UP)) @@ -171,7 +168,6 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn, cand = t; } -skip_key_lookup: if (cand) return cand; diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c index ccb1d97dfa050956f905460cc036554e35e651d9..d4c4eabd02b65095c08696388ea2d69829b15e56 100644 --- a/net/ipv4/ip_vti.c +++ b/net/ipv4/ip_vti.c @@ -677,10 +677,8 @@ static int __init vti_init(void) msg = "ipip tunnel"; err = xfrm4_tunnel_register(&ipip_handler, AF_INET); - if (err < 0) { - pr_info("%s: cant't register tunnel\n",__func__); + if (err < 0) goto xfrm_tunnel_failed; - } msg = "netlink interface"; err = rtnl_link_register(&vti_link_ops); diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c index be980c195fc55b0a23f144eb55641cb16a2eb2de..510d2ec4c76ac5acbd3f86332329e034d2a58fda 100644 --- a/net/ipv4/xfrm4_output.c +++ b/net/ipv4/xfrm4_output.c @@ -77,9 +77,7 @@ int xfrm4_output_finish(struct sock *sk, struct sk_buff *skb) { memset(IPCB(skb), 0, sizeof(*IPCB(skb))); -#ifdef CONFIG_NETFILTER IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED; -#endif return xfrm_output(sk, skb); } diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 6c90d423062be35b05306ef1455b4529ca87e935..822a1141c8889ef3e6817b17b0d67cc110668e8d 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -3263,6 +3263,10 @@ static void addrconf_addr_gen(struct inet6_dev *idev, bool prefix_route) if (netif_is_l3_master(idev->dev)) return; + /* no link local addresses on devices flagged as slaves */ + if (idev->dev->flags & IFF_SLAVE) + return; + ipv6_addr_set(&addr, htonl(0xFE800000), 0, 0, 0); switch (idev->cnf.addr_gen_mode) { diff --git a/net/ipv6/addrconf_core.c b/net/ipv6/addrconf_core.c index 5cd0029d930e2d2cd940cd1c618e5d5b81b952cc..66a1a0eb2ed05f1e9d49e367c168de73812131f1 100644 --- a/net/ipv6/addrconf_core.c +++ b/net/ipv6/addrconf_core.c @@ -127,11 +127,12 @@ int inet6addr_validator_notifier_call_chain(unsigned long val, void *v) } EXPORT_SYMBOL(inet6addr_validator_notifier_call_chain); -static int eafnosupport_ipv6_dst_lookup(struct net *net, struct sock *u1, - struct dst_entry **u2, - struct flowi6 *u3) +static struct dst_entry *eafnosupport_ipv6_dst_lookup_flow(struct net *net, + const struct sock *sk, + struct flowi6 *fl6, + const struct in6_addr *final_dst) { - return -EAFNOSUPPORT; + return ERR_PTR(-EAFNOSUPPORT); } static struct fib6_table *eafnosupport_fib6_get_table(struct net *net, u32 id) @@ -169,7 +170,7 @@ eafnosupport_ip6_mtu_from_fib6(struct fib6_info *f6i, struct in6_addr *daddr, } const struct ipv6_stub *ipv6_stub __read_mostly = &(struct ipv6_stub) { - .ipv6_dst_lookup = eafnosupport_ipv6_dst_lookup, + .ipv6_dst_lookup_flow = eafnosupport_ipv6_dst_lookup_flow, .fib6_get_table = eafnosupport_fib6_get_table, .fib6_table_lookup = eafnosupport_fib6_table_lookup, .fib6_lookup = eafnosupport_fib6_lookup, diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 79fcd9550fd2e83d9cdedb8b17da60895c0a9e55..5c2351deedc8f4de7cc2f1632dcc5c4c8f52e9e8 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -740,7 +740,7 @@ int inet6_sk_rebuild_header(struct sock *sk) &final); rcu_read_unlock(); - dst = ip6_dst_lookup_flow(sk, &fl6, final_p); + dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p); if (IS_ERR(dst)) { sk->sk_route_caps = 0; sk->sk_err_soft = -PTR_ERR(dst); @@ -904,7 +904,7 @@ static struct pernet_operations inet6_net_ops = { static const struct ipv6_stub ipv6_stub_impl = { .ipv6_sock_mc_join = ipv6_sock_mc_join, .ipv6_sock_mc_drop = ipv6_sock_mc_drop, - .ipv6_dst_lookup = ip6_dst_lookup, + .ipv6_dst_lookup_flow = ip6_dst_lookup_flow, .fib6_get_table = fib6_get_table, .fib6_table_lookup = fib6_table_lookup, .fib6_lookup = fib6_lookup, diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index 971a0fdf1fbc3bc13df69d882c49fd5030b35b4f..727f958dd869542ab31b3dd76908537446505245 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -89,7 +89,7 @@ int ip6_datagram_dst_update(struct sock *sk, bool fix_sk_saddr) final_p = fl6_update_dst(&fl6, opt, &final); rcu_read_unlock(); - dst = ip6_dst_lookup_flow(sk, &fl6, final_p); + dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p); if (IS_ERR(dst)) { err = PTR_ERR(dst); goto out; diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c index 890adadcda16aee20400d7dc394931fde8797c66..92fe9e565da0b685468a82f349ab4b1887042f1e 100644 --- a/net/ipv6/inet6_connection_sock.c +++ b/net/ipv6/inet6_connection_sock.c @@ -52,7 +52,7 @@ struct dst_entry *inet6_csk_route_req(const struct sock *sk, fl6->flowi6_uid = sk->sk_uid; security_req_classify_flow(req, flowi6_to_flowi(fl6)); - dst = ip6_dst_lookup_flow(sk, fl6, final_p); + dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_p); if (IS_ERR(dst)) return NULL; @@ -107,7 +107,7 @@ static struct dst_entry *inet6_csk_route_socket(struct sock *sk, dst = __inet6_csk_dst_check(sk, np->dst_cookie); if (!dst) { - dst = ip6_dst_lookup_flow(sk, fl6, final_p); + dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_p); if (!IS_ERR(dst)) ip6_dst_store(sk, dst, NULL, NULL); diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 9886a84c25117fb9ec8ad04f3764ebb4424ef3f3..22665e3638ac4486139f2a65cb5d82352d22e63e 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -1071,19 +1071,19 @@ EXPORT_SYMBOL_GPL(ip6_dst_lookup); * It returns a valid dst pointer on success, or a pointer encoded * error code. */ -struct dst_entry *ip6_dst_lookup_flow(const struct sock *sk, struct flowi6 *fl6, +struct dst_entry *ip6_dst_lookup_flow(struct net *net, const struct sock *sk, struct flowi6 *fl6, const struct in6_addr *final_dst) { struct dst_entry *dst = NULL; int err; - err = ip6_dst_lookup_tail(sock_net(sk), sk, &dst, fl6); + err = ip6_dst_lookup_tail(net, sk, &dst, fl6); if (err) return ERR_PTR(err); if (final_dst) fl6->daddr = *final_dst; - return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0); + return xfrm_lookup_route(net, dst, flowi6_to_flowi(fl6), sk, 0); } EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow); @@ -1115,7 +1115,7 @@ struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6, if (dst) return dst; - dst = ip6_dst_lookup_flow(sk, fl6, final_dst); + dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_dst); if (connected && !IS_ERR(dst)) ip6_sk_dst_store_flow(sk, dst_clone(dst), fl6); diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index a20be08f0e0bf07843c920ca98642fb79c02707d..231c489128e47a9d03e7b445e2087778b6c70183 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -185,15 +185,14 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, retv = -EBUSY; break; } - } else if (sk->sk_protocol == IPPROTO_TCP) { - if (sk->sk_prot != &tcpv6_prot) { - retv = -EBUSY; - break; - } - break; - } else { + } + if (sk->sk_protocol == IPPROTO_TCP && + sk->sk_prot != &tcpv6_prot) { + retv = -EBUSY; break; } + if (sk->sk_protocol != IPPROTO_TCP) + break; if (sk->sk_state != TCP_ESTABLISHED) { retv = -ENOTCONN; break; diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index a41156a00dd441be2f9b8ac2faa18ca631e71b64..8d19729f851629f50f0387b9862540ee25bee980 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -928,7 +928,7 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel); - dst = ip6_dst_lookup_flow(sk, &fl6, final_p); + dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p); if (IS_ERR(dst)) { err = PTR_ERR(dst); goto out; diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c index e997141aed8c059c40e6e5f0229e256317a8b483..a377be8a9fb4446f2e1e08966790c122d094f02e 100644 --- a/net/ipv6/syncookies.c +++ b/net/ipv6/syncookies.c @@ -240,7 +240,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) fl6.flowi6_uid = sk->sk_uid; security_req_classify_flow(req, flowi6_to_flowi(&fl6)); - dst = ip6_dst_lookup_flow(sk, &fl6, final_p); + dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p); if (IS_ERR(dst)) goto out_free; } diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 7b0c2498f461b14f62d483de6beb1327671ad8ce..2e76ebfdc907dde33475310b42fac7fd56eee3a8 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -268,7 +268,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); - dst = ip6_dst_lookup_flow(sk, &fl6, final_p); + dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p); if (IS_ERR(dst)) { err = PTR_ERR(dst); goto failure; @@ -885,7 +885,7 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 * Underlying function will use this to retrieve the network * namespace */ - dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL); + dst = ip6_dst_lookup_flow(sock_net(ctl_sk), ctl_sk, &fl6, NULL); if (!IS_ERR(dst)) { skb_dst_set(buff, dst); ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass); diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c index 6a74080005cf6acf15fa59d6d3dd14cbf01a1781..71d022704923c4ddf6fd3f7e9cc6b0e221149f7e 100644 --- a/net/ipv6/xfrm6_output.c +++ b/net/ipv6/xfrm6_output.c @@ -130,9 +130,7 @@ int xfrm6_output_finish(struct sock *sk, struct sk_buff *skb) { memset(IP6CB(skb), 0, sizeof(*IP6CB(skb))); -#ifdef CONFIG_NETFILTER IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED; -#endif return xfrm_output(sk, skb); } diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index 37a69df17cab982e427e158966b8059886ddd456..2f28f9910b92e14f1dc7f9c2950f302727369354 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@ -619,7 +619,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel); - dst = ip6_dst_lookup_flow(sk, &fl6, final_p); + dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p); if (IS_ERR(dst)) { err = PTR_ERR(dst); goto out; diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 93f72a4c36f9ec0ea8f7775ec1999ab192b3efbc..8028b03bfa7b7c65ef669a47b7a1c0d8d843d933 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -2384,7 +2384,7 @@ void ieee80211_sta_tx_notify(struct ieee80211_sub_if_data *sdata, if (!ieee80211_is_data(hdr->frame_control)) return; - if (ieee80211_is_nullfunc(hdr->frame_control) && + if (ieee80211_is_any_nullfunc(hdr->frame_control) && sdata->u.mgd.probe_send_count > 0) { if (ack) ieee80211_sta_reset_conn_monitor(sdata); diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index c7c456c86b0d318cf7acde90d17dfaf4fc62921a..c17e148e06e71677e37bec277b4234a869f833f8 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -1373,8 +1373,7 @@ ieee80211_rx_h_check_dup(struct ieee80211_rx_data *rx) return RX_CONTINUE; if (ieee80211_is_ctl(hdr->frame_control) || - ieee80211_is_nullfunc(hdr->frame_control) || - ieee80211_is_qos_nullfunc(hdr->frame_control) || + ieee80211_is_any_nullfunc(hdr->frame_control) || is_multicast_ether_addr(hdr->addr1)) return RX_CONTINUE; @@ -1753,8 +1752,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) * Drop (qos-)data::nullfunc frames silently, since they * are used only to control station power saving mode. */ - if (ieee80211_is_nullfunc(hdr->frame_control) || - ieee80211_is_qos_nullfunc(hdr->frame_control)) { + if (ieee80211_is_any_nullfunc(hdr->frame_control)) { I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc); /* @@ -2244,7 +2242,7 @@ static int ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc) /* Drop unencrypted frames if key is set. */ if (unlikely(!ieee80211_has_protected(fc) && - !ieee80211_is_nullfunc(fc) && + !ieee80211_is_any_nullfunc(fc) && ieee80211_is_data(fc) && rx->key)) return -EACCES; diff --git a/net/mac80211/status.c b/net/mac80211/status.c index d5db1c4e829bf8e0409fb8d6097e19ee20cbec7c..32f655884b46776caf45058f5a6a20b2aaeee788 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c @@ -491,8 +491,7 @@ static void ieee80211_report_ack_skb(struct ieee80211_local *local, rcu_read_lock(); sdata = ieee80211_sdata_from_skb(local, skb); if (sdata) { - if (ieee80211_is_nullfunc(hdr->frame_control) || - ieee80211_is_qos_nullfunc(hdr->frame_control)) + if (ieee80211_is_any_nullfunc(hdr->frame_control)) cfg80211_probe_status(sdata->dev, hdr->addr1, cookie, acked, info->status.ack_signal, @@ -871,7 +870,7 @@ static void __ieee80211_tx_status(struct ieee80211_hw *hw, I802_DEBUG_INC(local->dot11FailedCount); } - if ((ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc)) && + if (ieee80211_is_any_nullfunc(fc) && ieee80211_has_pm(fc) && ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS) && !(info->flags & IEEE80211_TX_CTL_INJECTED) && diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 84639363173bd937fe02ace34c836acf94b8726c..3160ffd93a153abfc265efa4fd67c00c57e9e5f8 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -300,7 +300,7 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx) if (unlikely(test_bit(SCAN_SW_SCANNING, &tx->local->scanning)) && test_bit(SDATA_STATE_OFFCHANNEL, &tx->sdata->state) && !ieee80211_is_probe_req(hdr->frame_control) && - !ieee80211_is_nullfunc(hdr->frame_control)) + !ieee80211_is_any_nullfunc(hdr->frame_control)) /* * When software scanning only nullfunc frames (to notify * the sleep state to the AP) and probe requests (for the diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c index d5a4db5b3fe7bef8591eae46d23e2a67a91e1965..7623d9aec6364386dfbf69e9e3d8ddbc41d55738 100644 --- a/net/mpls/af_mpls.c +++ b/net/mpls/af_mpls.c @@ -618,16 +618,15 @@ static struct net_device *inet6_fib_lookup_dev(struct net *net, struct net_device *dev; struct dst_entry *dst; struct flowi6 fl6; - int err; if (!ipv6_stub) return ERR_PTR(-EAFNOSUPPORT); memset(&fl6, 0, sizeof(fl6)); memcpy(&fl6.daddr, addr, sizeof(struct in6_addr)); - err = ipv6_stub->ipv6_dst_lookup(net, NULL, &dst, &fl6); - if (err) - return ERR_PTR(err); + dst = ipv6_stub->ipv6_dst_lookup_flow(net, NULL, &fl6, NULL); + if (IS_ERR(dst)) + return ERR_CAST(dst); dev = dst->dev; dev_hold(dev); diff --git a/net/netfilter/nf_nat_proto_udp.c b/net/netfilter/nf_nat_proto_udp.c index 5790f70a83b28154490a2a774f8a0f7288a38414..d85c31c2433cf3c302d6fe24b8da2a673edd8298 100644 --- a/net/netfilter/nf_nat_proto_udp.c +++ b/net/netfilter/nf_nat_proto_udp.c @@ -66,15 +66,14 @@ static bool udp_manip_pkt(struct sk_buff *skb, enum nf_nat_manip_type maniptype) { struct udphdr *hdr; - bool do_csum; if (!skb_make_writable(skb, hdroff + sizeof(*hdr))) return false; hdr = (struct udphdr *)(skb->data + hdroff); - do_csum = hdr->check || skb->ip_summed == CHECKSUM_PARTIAL; + __udp_manip_pkt(skb, l3proto, iphdroff, hdr, tuple, maniptype, + !!hdr->check); - __udp_manip_pkt(skb, l3proto, iphdroff, hdr, tuple, maniptype, do_csum); return true; } diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 5881f66688171dee4a5bb4a039a1a28f9cea59f6..1b8a53081632ffab249737a89af59ef3fe2906da 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -3450,7 +3450,7 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk, NFT_SET_INTERVAL | NFT_SET_TIMEOUT | NFT_SET_MAP | NFT_SET_EVAL | NFT_SET_OBJECT)) - return -EINVAL; + return -EOPNOTSUPP; /* Only one of these operations is supported */ if ((flags & (NFT_SET_MAP | NFT_SET_OBJECT)) == (NFT_SET_MAP | NFT_SET_OBJECT)) @@ -3488,7 +3488,7 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk, objtype = ntohl(nla_get_be32(nla[NFTA_SET_OBJ_TYPE])); if (objtype == NFT_OBJECT_UNSPEC || objtype > NFT_OBJECT_MAX) - return -EINVAL; + return -EOPNOTSUPP; } else if (flags & NFT_SET_OBJECT) return -EINVAL; else diff --git a/net/netfilter/nfnetlink_osf.c b/net/netfilter/nfnetlink_osf.c index b0bc130947c9426543f374530f9bbd2484da6e9e..131f9f8c0b0925c94d2f11092a57685e45289437 100644 --- a/net/netfilter/nfnetlink_osf.c +++ b/net/netfilter/nfnetlink_osf.c @@ -170,12 +170,12 @@ static bool nf_osf_match_one(const struct sk_buff *skb, static const struct tcphdr *nf_osf_hdr_ctx_init(struct nf_osf_hdr_ctx *ctx, const struct sk_buff *skb, const struct iphdr *ip, - unsigned char *opts) + unsigned char *opts, + struct tcphdr *_tcph) { const struct tcphdr *tcp; - struct tcphdr _tcph; - tcp = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(struct tcphdr), &_tcph); + tcp = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(struct tcphdr), _tcph); if (!tcp) return NULL; @@ -210,10 +210,11 @@ nf_osf_match(const struct sk_buff *skb, u_int8_t family, int fmatch = FMATCH_WRONG; struct nf_osf_hdr_ctx ctx; const struct tcphdr *tcp; + struct tcphdr _tcph; memset(&ctx, 0, sizeof(ctx)); - tcp = nf_osf_hdr_ctx_init(&ctx, skb, ip, opts); + tcp = nf_osf_hdr_ctx_init(&ctx, skb, ip, opts, &_tcph); if (!tcp) return false; @@ -270,10 +271,11 @@ const char *nf_osf_find(const struct sk_buff *skb, struct nf_osf_hdr_ctx ctx; const struct tcphdr *tcp; const char *genre = NULL; + struct tcphdr _tcph; memset(&ctx, 0, sizeof(ctx)); - tcp = nf_osf_hdr_ctx_init(&ctx, skb, ip, opts); + tcp = nf_osf_hdr_ctx_init(&ctx, skb, ip, opts, &_tcph); if (!tcp) return NULL; diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c index b76aa668a94bce6c6d1280d5cbf307d6ce94e013..53ced34a1fdd212dbc2d6b31a5a9bf2a66b37737 100644 --- a/net/netrom/nr_route.c +++ b/net/netrom/nr_route.c @@ -211,6 +211,7 @@ static int __must_check nr_add_node(ax25_address *nr, const char *mnemonic, /* refcount initialized at 1 */ spin_unlock_bh(&nr_node_list_lock); + nr_neigh_put(nr_neigh); return 0; } nr_node_lock(nr_node); diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c index 82d38f93c81a2bb5266819561bd2e4bfa21be3ff..518327dccb3cf0534a7ff906522e654aa0034c35 100644 --- a/net/qrtr/qrtr.c +++ b/net/qrtr/qrtr.c @@ -769,20 +769,21 @@ static int qrtr_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) node = NULL; if (addr->sq_node == QRTR_NODE_BCAST) { - enqueue_fn = qrtr_bcast_enqueue; - if (addr->sq_port != QRTR_PORT_CTRL) { + if (addr->sq_port != QRTR_PORT_CTRL && + qrtr_local_nid != QRTR_NODE_BCAST) { release_sock(sk); return -ENOTCONN; } + enqueue_fn = qrtr_bcast_enqueue; } else if (addr->sq_node == ipc->us.sq_node) { enqueue_fn = qrtr_local_enqueue; } else { - enqueue_fn = qrtr_node_enqueue; node = qrtr_node_lookup(addr->sq_node); if (!node) { release_sock(sk); return -ECONNRESET; } + enqueue_fn = qrtr_node_enqueue; } plen = (len + 3) & ~3; diff --git a/net/rxrpc/key.c b/net/rxrpc/key.c index e7f6b8823eb6e6c6794ed78e1197c1cf8aab4858..ad9d1b21cb0ba4ead61569fec8a125e30576e1c7 100644 --- a/net/rxrpc/key.c +++ b/net/rxrpc/key.c @@ -35,7 +35,7 @@ static void rxrpc_free_preparse_s(struct key_preparsed_payload *); static void rxrpc_destroy(struct key *); static void rxrpc_destroy_s(struct key *); static void rxrpc_describe(const struct key *, struct seq_file *); -static long rxrpc_read(const struct key *, char __user *, size_t); +static long rxrpc_read(const struct key *, char *, size_t); /* * rxrpc defined keys take an arbitrary string as the description and an @@ -1044,12 +1044,12 @@ EXPORT_SYMBOL(rxrpc_get_null_key); * - this returns the result in XDR form */ static long rxrpc_read(const struct key *key, - char __user *buffer, size_t buflen) + char *buffer, size_t buflen) { const struct rxrpc_key_token *token; const struct krb5_principal *princ; size_t size; - __be32 __user *xdr, *oldxdr; + __be32 *xdr, *oldxdr; u32 cnlen, toksize, ntoks, tok, zero; u16 toksizes[AFSTOKEN_MAX]; int loop; @@ -1126,30 +1126,25 @@ static long rxrpc_read(const struct key *key, if (!buffer || buflen < size) return size; - xdr = (__be32 __user *) buffer; + xdr = (__be32 *)buffer; zero = 0; #define ENCODE(x) \ do { \ - __be32 y = htonl(x); \ - if (put_user(y, xdr++) < 0) \ - goto fault; \ + *xdr++ = htonl(x); \ } while(0) #define ENCODE_DATA(l, s) \ do { \ u32 _l = (l); \ ENCODE(l); \ - if (copy_to_user(xdr, (s), _l) != 0) \ - goto fault; \ - if (_l & 3 && \ - copy_to_user((u8 __user *)xdr + _l, &zero, 4 - (_l & 3)) != 0) \ - goto fault; \ + memcpy(xdr, (s), _l); \ + if (_l & 3) \ + memcpy((u8 *)xdr + _l, &zero, 4 - (_l & 3)); \ xdr += (_l + 3) >> 2; \ } while(0) #define ENCODE64(x) \ do { \ __be64 y = cpu_to_be64(x); \ - if (copy_to_user(xdr, &y, 8) != 0) \ - goto fault; \ + memcpy(xdr, &y, 8); \ xdr += 8 >> 2; \ } while(0) #define ENCODE_STR(s) \ @@ -1240,8 +1235,4 @@ static long rxrpc_read(const struct key *key, ASSERTCMP((char __user *) xdr - buffer, ==, size); _leave(" = %zu", size); return size; - -fault: - _leave(" = -EFAULT"); - return -EFAULT; } diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c index 4c0087a48e8726919c04b8fa92d3d591905d1101..fe190a6918727891a0830ec6c812881d6ea428d4 100644 --- a/net/rxrpc/local_object.c +++ b/net/rxrpc/local_object.c @@ -169,15 +169,6 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net) goto error; } - /* we want to set the don't fragment bit */ - opt = IPV6_PMTUDISC_DO; - ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_MTU_DISCOVER, - (char *) &opt, sizeof(opt)); - if (ret < 0) { - _debug("setsockopt failed"); - goto error; - } - /* Fall through and set IPv4 options too otherwise we don't get * errors from IPv4 packets sent through the IPv6 socket. */ diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c index b0aa08e3796de1909563f7448de224cded2a7f73..da8a555ec3f628117ad9f67b51186e7241b419fd 100644 --- a/net/rxrpc/output.c +++ b/net/rxrpc/output.c @@ -480,41 +480,21 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb, skb->tstamp = ktime_get_real(); switch (conn->params.local->srx.transport.family) { + case AF_INET6: case AF_INET: opt = IP_PMTUDISC_DONT; - ret = kernel_setsockopt(conn->params.local->socket, - SOL_IP, IP_MTU_DISCOVER, - (char *)&opt, sizeof(opt)); - if (ret == 0) { - ret = kernel_sendmsg(conn->params.local->socket, &msg, - iov, 2, len); - conn->params.peer->last_tx_at = ktime_get_seconds(); - - opt = IP_PMTUDISC_DO; - kernel_setsockopt(conn->params.local->socket, SOL_IP, - IP_MTU_DISCOVER, - (char *)&opt, sizeof(opt)); - } - break; - -#ifdef CONFIG_AF_RXRPC_IPV6 - case AF_INET6: - opt = IPV6_PMTUDISC_DONT; - ret = kernel_setsockopt(conn->params.local->socket, - SOL_IPV6, IPV6_MTU_DISCOVER, - (char *)&opt, sizeof(opt)); - if (ret == 0) { - ret = kernel_sendmsg(conn->params.local->socket, &msg, - iov, 2, len); - conn->params.peer->last_tx_at = ktime_get_seconds(); - - opt = IPV6_PMTUDISC_DO; - kernel_setsockopt(conn->params.local->socket, - SOL_IPV6, IPV6_MTU_DISCOVER, - (char *)&opt, sizeof(opt)); - } + kernel_setsockopt(conn->params.local->socket, + SOL_IP, IP_MTU_DISCOVER, + (char *)&opt, sizeof(opt)); + ret = kernel_sendmsg(conn->params.local->socket, &msg, + iov, 2, len); + conn->params.peer->last_tx_at = ktime_get_seconds(); + + opt = IP_PMTUDISC_DO; + kernel_setsockopt(conn->params.local->socket, + SOL_IP, IP_MTU_DISCOVER, + (char *)&opt, sizeof(opt)); break; -#endif default: BUG(); diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c index 3e54ead1e921ad6f98c6daa9634f8f7a5f8ab700..250d3dae8af4d6cbad1576f47a97235b3ea8fa7c 100644 --- a/net/rxrpc/sendmsg.c +++ b/net/rxrpc/sendmsg.c @@ -62,8 +62,8 @@ static int rxrpc_wait_for_tx_window_nonintr(struct rxrpc_sock *rx, rtt = READ_ONCE(call->peer->rtt); rtt2 = nsecs_to_jiffies64(rtt) * 2; - if (rtt2 < 1) - rtt2 = 1; + if (rtt2 < 2) + rtt2 = 2; timeout = rtt2; tx_start = READ_ONCE(call->tx_hard_ack); diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c index eafc0d17d1748cf01f33d206cb46dde6ed81116c..63bfceeb8e3cd184425090c6557ff8051ec108e0 100644 --- a/net/sched/sch_choke.c +++ b/net/sched/sch_choke.c @@ -327,7 +327,8 @@ static void choke_reset(struct Qdisc *sch) sch->q.qlen = 0; sch->qstats.backlog = 0; - memset(q->tab, 0, (q->tab_mask + 1) * sizeof(struct sk_buff *)); + if (q->tab) + memset(q->tab, 0, (q->tab_mask + 1) * sizeof(struct sk_buff *)); q->head = q->tail = 0; red_restart(&q->vars); } diff --git a/net/sched/sch_etf.c b/net/sched/sch_etf.c index 1538d6fa81652023eb774c9c603ec791e87a3de3..2278f3d420cd2938d570d783a3da5ee3fa18911a 100644 --- a/net/sched/sch_etf.c +++ b/net/sched/sch_etf.c @@ -77,7 +77,7 @@ static bool is_packet_valid(struct Qdisc *sch, struct sk_buff *nskb) struct sock *sk = nskb->sk; ktime_t now; - if (!sk) + if (!sk || !sk_fullsock(sk)) return false; if (!sock_flag(sk, SOCK_TXTIME)) @@ -129,8 +129,9 @@ static void report_sock_error(struct sk_buff *skb, u32 err, u8 code) struct sock_exterr_skb *serr; struct sk_buff *clone; ktime_t txtime = skb->tstamp; + struct sock *sk = skb->sk; - if (!skb->sk || !(skb->sk->sk_txtime_report_errors)) + if (!sk || !sk_fullsock(sk) || !(sk->sk_txtime_report_errors)) return; clone = skb_clone(skb, GFP_ATOMIC); @@ -146,7 +147,7 @@ static void report_sock_error(struct sk_buff *skb, u32 err, u8 code) serr->ee.ee_data = (txtime >> 32); /* high part of tstamp */ serr->ee.ee_info = txtime; /* low part of tstamp */ - if (sock_queue_err_skb(skb->sk, clone)) + if (sock_queue_err_skb(sk, clone)) kfree_skb(clone); } diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index 137692cb8b4f9b40f6d328ce370fe4b62a560e5b..a862d9990be741456e1a1fd072185bc3ef8d7b4a 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c @@ -429,7 +429,7 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt, q->quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM])); if (tb[TCA_FQ_CODEL_DROP_BATCH_SIZE]) - q->drop_batch_size = min(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE])); + q->drop_batch_size = max(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE])); if (tb[TCA_FQ_CODEL_MEMORY_LIMIT]) q->memory_limit = min(1U << 31, nla_get_u32(tb[TCA_FQ_CODEL_MEMORY_LIMIT])); diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index d483d6ba59b72feab4ac51dd86f63f2647da2062..b89cf0971d3dd943f59fb110ccf209e7cc6580d1 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c @@ -641,6 +641,15 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt) if (ctl->divisor && (!is_power_of_2(ctl->divisor) || ctl->divisor > 65536)) return -EINVAL; + + /* slot->allot is a short, make sure quantum is not too big. */ + if (ctl->quantum) { + unsigned int scaled = SFQ_ALLOT_SIZE(ctl->quantum); + + if (scaled <= 0 || scaled > SHRT_MAX) + return -EINVAL; + } + if (ctl_v1 && !red_check_params(ctl_v1->qth_min, ctl_v1->qth_max, ctl_v1->Wlog)) return -EINVAL; diff --git a/net/sched/sch_skbprio.c b/net/sched/sch_skbprio.c index 52c0b6d8f1d79666027bc93023d61ac772ae5161..3d9de52849bc5d919a6aca8ae8b54fbf30a28e81 100644 --- a/net/sched/sch_skbprio.c +++ b/net/sched/sch_skbprio.c @@ -173,6 +173,9 @@ static int skbprio_change(struct Qdisc *sch, struct nlattr *opt, { struct tc_skbprio_qopt *ctl = nla_data(opt); + if (opt->nla_len != nla_attr_size(sizeof(*ctl))) + return -EINVAL; + sch->limit = ctl->limit; return 0; } diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index 4fede55b9010c9a050d51c7b9e776331a539dfb0..736d8ca9821bc8fd90423f6ac72c5980ff38d7be 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -240,7 +240,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, { struct sctp_association *asoc = t->asoc; struct dst_entry *dst = NULL; - struct flowi6 *fl6 = &fl->u.ip6; + struct flowi _fl; + struct flowi6 *fl6 = &_fl.u.ip6; struct sctp_bind_addr *bp; struct ipv6_pinfo *np = inet6_sk(sk); struct sctp_sockaddr_entry *laddr; @@ -250,7 +251,7 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, enum sctp_scope scope; __u8 matchlen = 0; - memset(fl6, 0, sizeof(struct flowi6)); + memset(&_fl, 0, sizeof(_fl)); fl6->daddr = daddr->v6.sin6_addr; fl6->fl6_dport = daddr->v6.sin6_port; fl6->flowi6_proto = IPPROTO_SCTP; @@ -287,9 +288,12 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final); rcu_read_unlock(); - dst = ip6_dst_lookup_flow(sk, fl6, final_p); - if (!asoc || saddr) + dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_p); + if (!asoc || saddr) { + t->dst = dst; + memcpy(fl, &_fl, sizeof(_fl)); goto out; + } bp = &asoc->base.bind_addr; scope = sctp_scope(daddr); @@ -312,6 +316,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, if ((laddr->a.sa.sa_family == AF_INET6) && (sctp_v6_cmp_addr(&dst_saddr, &laddr->a))) { rcu_read_unlock(); + t->dst = dst; + memcpy(fl, &_fl, sizeof(_fl)); goto out; } } @@ -340,7 +346,7 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, fl6->saddr = laddr->a.v6.sin6_addr; fl6->fl6_sport = laddr->a.v6.sin6_port; final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final); - bdst = ip6_dst_lookup_flow(sk, fl6, final_p); + bdst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_p); if (IS_ERR(bdst)) continue; @@ -350,6 +356,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, if (!IS_ERR_OR_NULL(dst)) dst_release(dst); dst = bdst; + t->dst = dst; + memcpy(fl, &_fl, sizeof(_fl)); break; } @@ -363,6 +371,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, dst_release(dst); dst = bdst; matchlen = bmatchlen; + t->dst = dst; + memcpy(fl, &_fl, sizeof(_fl)); } rcu_read_unlock(); @@ -371,14 +381,12 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, struct rt6_info *rt; rt = (struct rt6_info *)dst; - t->dst = dst; t->dst_cookie = rt6_get_cookie(rt); pr_debug("rt6_dst:%pI6/%d rt6_src:%pI6\n", &rt->rt6i_dst.addr, rt->rt6i_dst.plen, - &fl6->saddr); + &fl->u.ip6.saddr); } else { t->dst = NULL; - pr_debug("no route\n"); } } diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 269b528e50b9ba826ac7b14f2f5f4210b76c4bef..787c59d798f4bf73d78a20895ab2188542139423 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -424,7 +424,8 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr, { struct sctp_association *asoc = t->asoc; struct rtable *rt; - struct flowi4 *fl4 = &fl->u.ip4; + struct flowi _fl; + struct flowi4 *fl4 = &_fl.u.ip4; struct sctp_bind_addr *bp; struct sctp_sockaddr_entry *laddr; struct dst_entry *dst = NULL; @@ -434,7 +435,7 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr, if (t->dscp & SCTP_DSCP_SET_MASK) tos = t->dscp & SCTP_DSCP_VAL_MASK; - memset(fl4, 0x0, sizeof(struct flowi4)); + memset(&_fl, 0x0, sizeof(_fl)); fl4->daddr = daddr->v4.sin_addr.s_addr; fl4->fl4_dport = daddr->v4.sin_port; fl4->flowi4_proto = IPPROTO_SCTP; @@ -453,8 +454,11 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr, &fl4->saddr); rt = ip_route_output_key(sock_net(sk), fl4); - if (!IS_ERR(rt)) + if (!IS_ERR(rt)) { dst = &rt->dst; + t->dst = dst; + memcpy(fl, &_fl, sizeof(_fl)); + } /* If there is no association or if a source address is passed, no * more validation is required. @@ -517,27 +521,33 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr, odev = __ip_dev_find(sock_net(sk), laddr->a.v4.sin_addr.s_addr, false); if (!odev || odev->ifindex != fl4->flowi4_oif) { - if (!dst) + if (!dst) { dst = &rt->dst; - else + t->dst = dst; + memcpy(fl, &_fl, sizeof(_fl)); + } else { dst_release(&rt->dst); + } continue; } dst_release(dst); dst = &rt->dst; + t->dst = dst; + memcpy(fl, &_fl, sizeof(_fl)); break; } out_unlock: rcu_read_unlock(); out: - t->dst = dst; - if (dst) + if (dst) { pr_debug("rt_dst:%pI4, rt_src:%pI4\n", - &fl4->daddr, &fl4->saddr); - else + &fl->u.ip4.daddr, &fl->u.ip4.saddr); + } else { + t->dst = NULL; pr_debug("no route\n"); + } } /* For v4, the source address is cached in the route entry(dst). So no need diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index fb546b2d67ca85fddc357229b2771f5b487924f5..ce6053be60bc8975ded5bf7449a6138d7780233d 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -871,7 +871,11 @@ struct sctp_chunk *sctp_make_shutdown(const struct sctp_association *asoc, struct sctp_chunk *retval; __u32 ctsn; - ctsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map); + if (chunk && chunk->asoc) + ctsn = sctp_tsnmap_get_ctsn(&chunk->asoc->peer.tsn_map); + else + ctsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map); + shut.cum_tsn_ack = htonl(ctsn); retval = sctp_make_control(asoc, SCTP_CID_SHUTDOWN, 0, diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 9f4d325f3a79dea54da35c12cc2a53ac42710959..c437ae93b5a95aaec5dfcab03ca914a5b62da75d 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -1880,7 +1880,7 @@ static enum sctp_disposition sctp_sf_do_dupcook_a( */ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); return sctp_sf_do_9_2_start_shutdown(net, ep, asoc, - SCTP_ST_CHUNK(0), NULL, + SCTP_ST_CHUNK(0), repl, commands); } else { sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, @@ -5483,7 +5483,7 @@ enum sctp_disposition sctp_sf_do_9_2_start_shutdown( * in the Cumulative TSN Ack field the last sequential TSN it * has received from the peer. */ - reply = sctp_make_shutdown(asoc, NULL); + reply = sctp_make_shutdown(asoc, arg); if (!reply) goto nomem; @@ -6081,7 +6081,7 @@ enum sctp_disposition sctp_sf_autoclose_timer_expire( disposition = SCTP_DISPOSITION_CONSUME; if (sctp_outq_is_empty(&asoc->outqueue)) { disposition = sctp_sf_do_9_2_start_shutdown(net, ep, asoc, type, - arg, commands); + NULL, commands); } return disposition; diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 95f9068b8549721f922bb31a80e7fd7b2d52d9e7..c93be3ba5df2989ed41e62a904119a983c70c1c3 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -165,29 +165,44 @@ static void sctp_clear_owner_w(struct sctp_chunk *chunk) skb_orphan(chunk->skb); } +#define traverse_and_process() \ +do { \ + msg = chunk->msg; \ + if (msg == prev_msg) \ + continue; \ + list_for_each_entry(c, &msg->chunks, frag_list) { \ + if ((clear && asoc->base.sk == c->skb->sk) || \ + (!clear && asoc->base.sk != c->skb->sk)) \ + cb(c); \ + } \ + prev_msg = msg; \ +} while (0) + static void sctp_for_each_tx_datachunk(struct sctp_association *asoc, + bool clear, void (*cb)(struct sctp_chunk *)) { + struct sctp_datamsg *msg, *prev_msg = NULL; struct sctp_outq *q = &asoc->outqueue; + struct sctp_chunk *chunk, *c; struct sctp_transport *t; - struct sctp_chunk *chunk; list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) list_for_each_entry(chunk, &t->transmitted, transmitted_list) - cb(chunk); + traverse_and_process(); list_for_each_entry(chunk, &q->retransmit, transmitted_list) - cb(chunk); + traverse_and_process(); list_for_each_entry(chunk, &q->sacked, transmitted_list) - cb(chunk); + traverse_and_process(); list_for_each_entry(chunk, &q->abandoned, transmitted_list) - cb(chunk); + traverse_and_process(); list_for_each_entry(chunk, &q->out_chunk_list, list) - cb(chunk); + traverse_and_process(); } static void sctp_for_each_rx_skb(struct sctp_association *asoc, struct sock *sk, @@ -8899,9 +8914,9 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, * paths won't try to lock it and then oldsk. */ lock_sock_nested(newsk, SINGLE_DEPTH_NESTING); - sctp_for_each_tx_datachunk(assoc, sctp_clear_owner_w); + sctp_for_each_tx_datachunk(assoc, true, sctp_clear_owner_w); sctp_assoc_migrate(assoc, newsk); - sctp_for_each_tx_datachunk(assoc, sctp_set_owner_w); + sctp_for_each_tx_datachunk(assoc, false, sctp_set_owner_w); /* If the association on the newsk is already closed before accept() * is called, set RCV_SHUTDOWN flag. diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index 6cf0fd37cbf001c54ef77bc5a8a792691d1e1ba2..c8ee8e801edb847e7869243c4fc64f5986931e25 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@ -878,9 +878,6 @@ int svc_send(struct svc_rqst *rqstp) if (!xprt) goto out; - /* release the receive skb before sending the reply */ - xprt->xpt_ops->xpo_release_rqst(rqstp); - /* calculate over-all length */ xb = &rqstp->rq_res; xb->len = xb->head[0].iov_len + diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 8566531c2f10f215534aac95e1fff6a255f985b2..d0b5a1c47a32445801b2931156c9057c49ae4cdf 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -636,6 +636,8 @@ svc_udp_sendto(struct svc_rqst *rqstp) { int error; + svc_release_udp_skb(rqstp); + error = svc_sendto(rqstp, &rqstp->rq_res); if (error == -ECONNREFUSED) /* ICMP error on earlier request. */ @@ -1173,6 +1175,8 @@ static int svc_tcp_sendto(struct svc_rqst *rqstp) int sent; __be32 reclen; + svc_release_skb(rqstp); + /* Set up the first element of the reply kvec. * Any other kvecs that may be in use have been taken * care of by the server implementation itself. diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c index b24d5b8f2feeda5a2d5d66d755694475a6003cf7..16c8174658fd1545be721ac519d851ff1abf8cf4 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c @@ -226,6 +226,26 @@ void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma, svc_rdma_recv_ctxt_destroy(rdma, ctxt); } +/** + * svc_rdma_release_rqst - Release transport-specific per-rqst resources + * @rqstp: svc_rqst being released + * + * Ensure that the recv_ctxt is released whether or not a Reply + * was sent. For example, the client could close the connection, + * or svc_process could drop an RPC, before the Reply is sent. + */ +void svc_rdma_release_rqst(struct svc_rqst *rqstp) +{ + struct svc_rdma_recv_ctxt *ctxt = rqstp->rq_xprt_ctxt; + struct svc_xprt *xprt = rqstp->rq_xprt; + struct svcxprt_rdma *rdma = + container_of(xprt, struct svcxprt_rdma, sc_xprt); + + rqstp->rq_xprt_ctxt = NULL; + if (ctxt) + svc_rdma_recv_ctxt_put(rdma, ctxt); +} + static int __svc_rdma_post_recv(struct svcxprt_rdma *rdma, struct svc_rdma_recv_ctxt *ctxt) { @@ -704,6 +724,8 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp) __be32 *p; int ret; + rqstp->rq_xprt_ctxt = NULL; + spin_lock(&rdma_xprt->sc_rq_dto_lock); ctxt = svc_rdma_next_recv_ctxt(&rdma_xprt->sc_read_complete_q); if (ctxt) { diff --git a/net/sunrpc/xprtrdma/svc_rdma_rw.c b/net/sunrpc/xprtrdma/svc_rdma_rw.c index dc1951759a8eff65dd8b0f315af692e2e59a5def..4fc0ce12708949443425cf5831170fd587998fcc 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_rw.c +++ b/net/sunrpc/xprtrdma/svc_rdma_rw.c @@ -331,8 +331,6 @@ static int svc_rdma_post_chunk_ctxt(struct svc_rdma_chunk_ctxt *cc) if (atomic_sub_return(cc->cc_sqecount, &rdma->sc_sq_avail) > 0) { ret = ib_post_send(rdma->sc_qp, first_wr, &bad_wr); - trace_svcrdma_post_rw(&cc->cc_cqe, - cc->cc_sqecount, ret); if (ret) break; return 0; @@ -345,6 +343,7 @@ static int svc_rdma_post_chunk_ctxt(struct svc_rdma_chunk_ctxt *cc) trace_svcrdma_sq_retry(rdma); } while (1); + trace_svcrdma_sq_post_err(rdma, ret); set_bit(XPT_CLOSE, &xprt->xpt_flags); /* If even one was posted, there will be a completion. */ diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c index e8ad7ddf347ad2e3af86d7e06358e9ac85137023..aa4d19a780d78e3db429647eb52b1c1c009f59f8 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c @@ -310,15 +310,17 @@ int svc_rdma_send(struct svcxprt_rdma *rdma, struct ib_send_wr *wr) } svc_xprt_get(&rdma->sc_xprt); + trace_svcrdma_post_send(wr); ret = ib_post_send(rdma->sc_qp, wr, NULL); - trace_svcrdma_post_send(wr, ret); - if (ret) { - set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags); - svc_xprt_put(&rdma->sc_xprt); - wake_up(&rdma->sc_send_wait); - } - break; + if (ret) + break; + return 0; } + + trace_svcrdma_sq_post_err(rdma, ret); + set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags); + svc_xprt_put(&rdma->sc_xprt); + wake_up(&rdma->sc_send_wait); return ret; } @@ -906,12 +908,7 @@ int svc_rdma_sendto(struct svc_rqst *rqstp) wr_lst, rp_ch); if (ret < 0) goto err1; - ret = 0; - -out: - rqstp->rq_xprt_ctxt = NULL; - svc_rdma_recv_ctxt_put(rdma, rctxt); - return ret; + return 0; err2: if (ret != -E2BIG && ret != -EINVAL) @@ -920,14 +917,12 @@ int svc_rdma_sendto(struct svc_rqst *rqstp) ret = svc_rdma_send_error_msg(rdma, sctxt, rqstp); if (ret < 0) goto err1; - ret = 0; - goto out; + return 0; err1: svc_rdma_send_ctxt_put(rdma, sctxt); err0: trace_svcrdma_send_failed(rqstp, ret); set_bit(XPT_CLOSE, &xprt->xpt_flags); - ret = -ENOTCONN; - goto out; + return -ENOTCONN; } diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index 7308992b7a1844f05bad81aa14ee1d33ae929a96..f1824303e076e0817a4eeb37fef3894beb23fa1f 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c @@ -71,7 +71,6 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv, struct sockaddr *sa, int salen, int flags); static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt); -static void svc_rdma_release_rqst(struct svc_rqst *); static void svc_rdma_detach(struct svc_xprt *xprt); static void svc_rdma_free(struct svc_xprt *xprt); static int svc_rdma_has_wspace(struct svc_xprt *xprt); @@ -616,10 +615,6 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) return NULL; } -static void svc_rdma_release_rqst(struct svc_rqst *rqstp) -{ -} - /* * When connected, an svc_xprt has at least two references: * diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c index 35558656fe02fa28f304dd42372d172e0fd3ae71..41f4464ac6cc5b5f04c278574a7bd6c47e4646eb 100644 --- a/net/tipc/topsrv.c +++ b/net/tipc/topsrv.c @@ -409,10 +409,11 @@ static int tipc_conn_rcv_from_sock(struct tipc_conn *con) read_lock_bh(&sk->sk_callback_lock); ret = tipc_conn_rcv_sub(srv, con, &s); read_unlock_bh(&sk->sk_callback_lock); + if (!ret) + return 0; } - if (ret < 0) - tipc_conn_close(con); + tipc_conn_close(con); return ret; } diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c index 382c84d9339d627c2068dd097fff1bb236a6a116..1d623547970617136ed6429c3112610dfc7be3ae 100644 --- a/net/tipc/udp_media.c +++ b/net/tipc/udp_media.c @@ -189,10 +189,13 @@ static int tipc_udp_xmit(struct net *net, struct sk_buff *skb, .saddr = src->ipv6, .flowi6_proto = IPPROTO_UDP }; - err = ipv6_stub->ipv6_dst_lookup(net, ub->ubsock->sk, &ndst, - &fl6); - if (err) + ndst = ipv6_stub->ipv6_dst_lookup_flow(net, + ub->ubsock->sk, + &fl6, NULL); + if (IS_ERR(ndst)) { + err = PTR_ERR(ndst); goto tx_error; + } ttl = ip6_dst_hoplimit(ndst); err = udp_tunnel6_xmit_skb(ndst, ub->ubsock->sk, skb, NULL, &src->ipv6, &dst->ipv6, 0, ttl, 0, diff --git a/net/wireless/core.h b/net/wireless/core.h index f5d58652108dddc3173b7caa438c59175bc75fba..5dd906bcade5bf7d68f5449c6ad031c61ee561ff 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h @@ -3,6 +3,7 @@ * Wireless configuration interface internals. * * Copyright 2006-2010 Johannes Berg + * Copyright (C) 2018-2019 Intel Corporation */ #ifndef __NET_WIRELESS_CORE_H #define __NET_WIRELESS_CORE_H @@ -170,12 +171,23 @@ static inline struct cfg80211_internal_bss *bss_from_pub(struct cfg80211_bss *pu static inline void cfg80211_hold_bss(struct cfg80211_internal_bss *bss) { atomic_inc(&bss->hold); + if (bss->pub.transmitted_bss) { + bss = container_of(bss->pub.transmitted_bss, + struct cfg80211_internal_bss, pub); + atomic_inc(&bss->hold); + } } static inline void cfg80211_unhold_bss(struct cfg80211_internal_bss *bss) { int r = atomic_dec_return(&bss->hold); WARN_ON(r < 0); + if (bss->pub.transmitted_bss) { + bss = container_of(bss->pub.transmitted_bss, + struct cfg80211_internal_bss, pub); + r = atomic_dec_return(&bss->hold); + WARN_ON(r < 0); + } } diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 6160a4647e6b2cc0571862397b66a0ca8952abee..1b024e34013d19e95b5e063ec16b3d2913f48995 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -539,7 +539,8 @@ nl80211_coalesce_policy[NUM_NL80211_ATTR_COALESCE_RULE] = { /* policy for GTK rekey offload attributes */ static const struct nla_policy nl80211_rekey_policy[NUM_NL80211_REKEY_DATA] = { - [NL80211_REKEY_DATA_KEK] = { .len = NL80211_KEK_LEN }, + [NL80211_REKEY_DATA_KEK] = { .type = NLA_BINARY, + .len = FILS_MAX_KEK_LEN }, [NL80211_REKEY_DATA_KCK] = { .len = NL80211_KCK_LEN }, [NL80211_REKEY_DATA_REPLAY_CTR] = { .len = NL80211_REPLAY_CTR_LEN }, }; @@ -860,7 +861,7 @@ struct key_parse { struct key_params p; int idx; int type; - bool def, defmgmt; + bool def, defmgmt, defbeacon; bool def_uni, def_multi; }; @@ -875,12 +876,13 @@ static int nl80211_parse_key_new(struct genl_info *info, struct nlattr *key, k->def = !!tb[NL80211_KEY_DEFAULT]; k->defmgmt = !!tb[NL80211_KEY_DEFAULT_MGMT]; + k->defbeacon = !!tb[NL80211_KEY_DEFAULT_BEACON]; if (k->def) { k->def_uni = true; k->def_multi = true; } - if (k->defmgmt) + if (k->defmgmt || k->defbeacon) k->def_multi = true; if (tb[NL80211_KEY_IDX]) @@ -992,14 +994,17 @@ static int nl80211_parse_key(struct genl_info *info, struct key_parse *k) if (err) return err; - if (k->def && k->defmgmt) { - GENL_SET_ERR_MSG(info, "key with def && defmgmt is invalid"); + if ((k->def ? 1 : 0) + (k->defmgmt ? 1 : 0) + + (k->defbeacon ? 1 : 0) > 1) { + GENL_SET_ERR_MSG(info, + "key with multiple default flags is invalid"); return -EINVAL; } - if (k->defmgmt) { + if (k->defmgmt || k->defbeacon) { if (k->def_uni || !k->def_multi) { - GENL_SET_ERR_MSG(info, "defmgmt key must be mcast"); + GENL_SET_ERR_MSG(info, + "defmgmt/defbeacon key must be mcast"); return -EINVAL; } } @@ -1011,14 +1016,20 @@ static int nl80211_parse_key(struct genl_info *info, struct key_parse *k) "defmgmt key idx not 4 or 5"); return -EINVAL; } + } else if (k->defbeacon) { + if (k->idx < 6 || k->idx > 7) { + GENL_SET_ERR_MSG(info, + "defbeacon key idx not 6 or 7"); + return -EINVAL; + } } else if (k->def) { if (k->idx < 0 || k->idx > 3) { GENL_SET_ERR_MSG(info, "def key idx not 0-3"); return -EINVAL; } } else { - if (k->idx < 0 || k->idx > 5) { - GENL_SET_ERR_MSG(info, "key idx not 0-5"); + if (k->idx < 0 || k->idx > 7) { + GENL_SET_ERR_MSG(info, "key idx not 0-7"); return -EINVAL; } } @@ -1635,6 +1646,46 @@ static int nl80211_add_commands_unsplit(struct cfg80211_registered_device *rdev, return -ENOBUFS; } +static int +nl80211_put_iftype_akm_suites(struct cfg80211_registered_device *rdev, + struct sk_buff *msg) +{ + int i; + struct nlattr *nested, *nested_akms; + const struct wiphy_iftype_akm_suites *iftype_akms; + + if (!rdev->wiphy.num_iftype_akm_suites || + !rdev->wiphy.iftype_akm_suites) + return 0; + + nested = nla_nest_start(msg, NL80211_ATTR_IFTYPE_AKM_SUITES); + if (!nested) + return -ENOBUFS; + + for (i = 0; i < rdev->wiphy.num_iftype_akm_suites; i++) { + nested_akms = nla_nest_start(msg, i + 1); + if (!nested_akms) + return -ENOBUFS; + + iftype_akms = &rdev->wiphy.iftype_akm_suites[i]; + + if (nl80211_put_iftypes(msg, NL80211_IFTYPE_AKM_ATTR_IFTYPES, + iftype_akms->iftypes_mask)) + return -ENOBUFS; + + if (nla_put(msg, NL80211_IFTYPE_AKM_ATTR_SUITES, + sizeof(u32) * iftype_akms->n_akm_suites, + iftype_akms->akm_suites)) { + return -ENOBUFS; + } + nla_nest_end(msg, nested_akms); + } + + nla_nest_end(msg, nested); + + return 0; +} + struct nl80211_dump_wiphy_state { s64 filter_wiphy; long start; @@ -2138,6 +2189,9 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev, goto nla_put_failure; } + if (nl80211_put_iftype_akm_suites(rdev, msg)) + goto nla_put_failure; + /* done */ state->split_start = 0; break; @@ -3449,8 +3503,14 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info) void *hdr; struct sk_buff *msg; - if (info->attrs[NL80211_ATTR_KEY_IDX]) + if (info->attrs[NL80211_ATTR_KEY_IDX]) { key_idx = nla_get_u8(info->attrs[NL80211_ATTR_KEY_IDX]); + if (key_idx > 5 && + !wiphy_ext_feature_isset( + &rdev->wiphy, + NL80211_EXT_FEATURE_BEACON_PROTECTION)) + return -EINVAL; + } if (key_idx > 5) return -EINVAL; @@ -3529,7 +3589,7 @@ static int nl80211_set_key(struct sk_buff *skb, struct genl_info *info) return -EINVAL; /* only support setting default key */ - if (!key.def && !key.defmgmt) + if (!key.def && !key.defmgmt && !key.defbeacon) return -EINVAL; wdev_lock(dev->ieee80211_ptr); @@ -3553,7 +3613,7 @@ static int nl80211_set_key(struct sk_buff *skb, struct genl_info *info) #ifdef CONFIG_CFG80211_WEXT dev->ieee80211_ptr->wext.default_key = key.idx; #endif - } else { + } else if (key.defmgmt) { if (key.def_uni || !key.def_multi) { err = -EINVAL; goto out; @@ -3575,6 +3635,24 @@ static int nl80211_set_key(struct sk_buff *skb, struct genl_info *info) #ifdef CONFIG_CFG80211_WEXT dev->ieee80211_ptr->wext.default_mgmt_key = key.idx; #endif + } else if (key.defbeacon) { + if (key.def_uni || !key.def_multi) { + err = -EINVAL; + goto out; + } + + if (!rdev->ops->set_default_beacon_key) { + err = -EOPNOTSUPP; + goto out; + } + + err = nl80211_key_allowed(dev->ieee80211_ptr); + if (err) + goto out; + + err = rdev_set_default_beacon_key(rdev, dev, key.idx); + if (err) + goto out; } out: @@ -4803,6 +4881,8 @@ static int nl80211_send_station(struct sk_buff *msg, u32 cmd, u32 portid, PUT_SINFO_U64(BEACON_RX, rx_beacon); PUT_SINFO(BEACON_SIGNAL_AVG, rx_beacon_signal_avg, u8); PUT_SINFO(ACK_SIGNAL, ack_signal, u8); + PUT_SINFO(RX_MPDUS, rx_mpdu_count, u32); + PUT_SINFO(FCS_ERROR_COUNT, fcs_err_count, u32); if (wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_DATA_ACK_SIGNAL_SUPPORT)) PUT_SINFO(DATA_ACK_SIGNAL_AVG, avg_ack_signal, s8); @@ -11546,15 +11626,27 @@ static int nl80211_set_rekey_data(struct sk_buff *skb, struct genl_info *info) if (!tb[NL80211_REKEY_DATA_REPLAY_CTR] || !tb[NL80211_REKEY_DATA_KEK] || !tb[NL80211_REKEY_DATA_KCK]) return -EINVAL; + if (!tb[NL80211_REKEY_DATA_KEK] || !tb[NL80211_REKEY_DATA_REPLAY_CTR] || + (!wiphy_ext_feature_isset(&rdev->wiphy, + NL80211_EXT_FEATURE_FILS_SK_OFFLOAD) && + !wiphy_ext_feature_isset(&rdev->wiphy, + NL80211_EXT_FEATURE_FILS_STA) && + !tb[NL80211_REKEY_DATA_KCK])) + return -EINVAL; + if (nla_len(tb[NL80211_REKEY_DATA_REPLAY_CTR]) != NL80211_REPLAY_CTR_LEN) return -ERANGE; - if (nla_len(tb[NL80211_REKEY_DATA_KEK]) != NL80211_KEK_LEN) + if (nla_len(tb[NL80211_REKEY_DATA_KEK]) < NL80211_KEK_LEN) return -ERANGE; - if (nla_len(tb[NL80211_REKEY_DATA_KCK]) != NL80211_KCK_LEN) + if (tb[NL80211_REKEY_DATA_KCK] && + nla_len(tb[NL80211_REKEY_DATA_KCK]) != NL80211_KCK_LEN) return -ERANGE; + memset(&rekey_data, 0, sizeof(rekey_data)); rekey_data.kek = nla_data(tb[NL80211_REKEY_DATA_KEK]); - rekey_data.kck = nla_data(tb[NL80211_REKEY_DATA_KCK]); + rekey_data.kek_len = nla_len(tb[NL80211_REKEY_DATA_KEK]); + if (tb[NL80211_REKEY_DATA_KCK]) + rekey_data.kck = nla_data(tb[NL80211_REKEY_DATA_KCK]); rekey_data.replay_ctr = nla_data(tb[NL80211_REKEY_DATA_REPLAY_CTR]); wdev_lock(wdev); @@ -13101,6 +13193,31 @@ static int nl80211_tx_control_port(struct sk_buff *skb, struct genl_info *info) return err; } +static int nl80211_update_owe_info(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg80211_registered_device *rdev = info->user_ptr[0]; + struct cfg80211_update_owe_info owe_info; + struct net_device *dev = info->user_ptr[1]; + + if (!rdev->ops->update_owe_info) + return -EOPNOTSUPP; + + if (!info->attrs[NL80211_ATTR_STATUS_CODE] || + !info->attrs[NL80211_ATTR_MAC]) + return -EINVAL; + + memset(&owe_info, 0, sizeof(owe_info)); + owe_info.status = nla_get_u16(info->attrs[NL80211_ATTR_STATUS_CODE]); + nla_memcpy(owe_info.peer, info->attrs[NL80211_ATTR_MAC], ETH_ALEN); + + if (info->attrs[NL80211_ATTR_IE]) { + owe_info.ie = nla_data(info->attrs[NL80211_ATTR_IE]); + owe_info.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); + } + + return rdev_update_owe_info(rdev, dev, &owe_info); +} + #define NL80211_FLAG_NEED_WIPHY 0x01 #define NL80211_FLAG_NEED_NETDEV 0x02 #define NL80211_FLAG_NEED_RTNL 0x04 @@ -14018,6 +14135,13 @@ static const struct genl_ops nl80211_ops[] = { .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, + { + .cmd = NL80211_CMD_UPDATE_OWE_INFO, + .doit = nl80211_update_owe_info, + .flags = GENL_ADMIN_PERM, + .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | + NL80211_FLAG_NEED_RTNL, + }, }; static struct genl_family nl80211_fam __ro_after_init = { @@ -16197,6 +16321,46 @@ int cfg80211_external_auth_request(struct net_device *dev, } EXPORT_SYMBOL(cfg80211_external_auth_request); +void cfg80211_update_owe_info_event(struct net_device *netdev, + struct cfg80211_update_owe_info *owe_info, + gfp_t gfp) +{ + struct wiphy *wiphy = netdev->ieee80211_ptr->wiphy; + struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); + struct sk_buff *msg; + void *hdr; + + trace_cfg80211_update_owe_info_event(wiphy, netdev, owe_info); + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); + if (!msg) + return; + + hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_UPDATE_OWE_INFO); + if (!hdr) + goto nla_put_failure; + + if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || + nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || + nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, owe_info->peer)) + goto nla_put_failure; + + if (!owe_info->ie_len || + nla_put(msg, NL80211_ATTR_IE, owe_info->ie_len, owe_info->ie)) + goto nla_put_failure; + + genlmsg_end(msg, hdr); + + genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, + NL80211_MCGRP_MLME, gfp); + return; + +nla_put_failure: + genlmsg_cancel(msg, hdr); + nlmsg_free(msg); +} +EXPORT_SYMBOL(cfg80211_update_owe_info_event); + /* initialisation/exit functions */ int __init nl80211_init(void) diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h index a8c58aeb9dde9404cdbd86858baa65955879d1e3..7cfaab135647d15e98014b3034b85f30f04b4268 100644 --- a/net/wireless/rdev-ops.h +++ b/net/wireless/rdev-ops.h @@ -135,6 +135,19 @@ rdev_set_default_mgmt_key(struct cfg80211_registered_device *rdev, return ret; } +static inline int +rdev_set_default_beacon_key(struct cfg80211_registered_device *rdev, + struct net_device *netdev, u8 key_index) +{ + int ret; + + trace_rdev_set_default_beacon_key(&rdev->wiphy, netdev, key_index); + ret = rdev->ops->set_default_beacon_key(&rdev->wiphy, netdev, + key_index); + trace_rdev_return_int(&rdev->wiphy, ret); + return ret; +} + static inline int rdev_start_ap(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_ap_settings *settings) @@ -1246,4 +1259,17 @@ rdev_external_auth(struct cfg80211_registered_device *rdev, return ret; } +static inline int rdev_update_owe_info(struct cfg80211_registered_device *rdev, + struct net_device *dev, + struct cfg80211_update_owe_info *oweinfo) +{ + int ret = -EOPNOTSUPP; + + trace_rdev_update_owe_info(&rdev->wiphy, dev, oweinfo); + if (rdev->ops->update_owe_info) + ret = rdev->ops->update_owe_info(&rdev->wiphy, dev, oweinfo); + trace_rdev_return_int(&rdev->wiphy, ret); + return ret; +} + #endif /* __CFG80211_RDEV_OPS */ diff --git a/net/wireless/scan.c b/net/wireless/scan.c index e5d61ba837addd63934d4b3e99a5f56aac31fd1e..aa60d99242d2807b10d87108796d87f8beda347f 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -5,6 +5,7 @@ * Copyright 2008 Johannes Berg * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright 2016 Intel Deutschland GmbH + * Copyright (C) 2018-2019 Intel Corporation */ #include #include @@ -109,6 +110,12 @@ static inline void bss_ref_get(struct cfg80211_registered_device *rdev, pub); bss->refcount++; } + if (bss->pub.transmitted_bss) { + bss = container_of(bss->pub.transmitted_bss, + struct cfg80211_internal_bss, + pub); + bss->refcount++; + } } static inline void bss_ref_put(struct cfg80211_registered_device *rdev, @@ -125,6 +132,18 @@ static inline void bss_ref_put(struct cfg80211_registered_device *rdev, if (hbss->refcount == 0) bss_free(hbss); } + + if (bss->pub.transmitted_bss) { + struct cfg80211_internal_bss *tbss; + + tbss = container_of(bss->pub.transmitted_bss, + struct cfg80211_internal_bss, + pub); + tbss->refcount--; + if (tbss->refcount == 0) + bss_free(tbss); + } + bss->refcount--; if (bss->refcount == 0) bss_free(bss); @@ -150,6 +169,7 @@ static bool __cfg80211_unlink_bss(struct cfg80211_registered_device *rdev, } list_del_init(&bss->list); + list_del_init(&bss->pub.nontrans_list); rb_erase(&bss->rbn, &rdev->bss_tree); rdev->bss_entries--; WARN_ONCE((rdev->bss_entries == 0) ^ list_empty(&rdev->bss_list), @@ -159,6 +179,156 @@ static bool __cfg80211_unlink_bss(struct cfg80211_registered_device *rdev, return true; } +static size_t cfg80211_gen_new_ie(const u8 *ie, size_t ielen, + const u8 *subelement, size_t subie_len, + u8 *new_ie, gfp_t gfp) +{ + u8 *pos, *tmp; + const u8 *tmp_old, *tmp_new; + u8 *sub_copy; + + /* copy subelement as we need to change its content to + * mark an ie after it is processed. + */ + sub_copy = kmalloc(subie_len, gfp); + if (!sub_copy) + return 0; + memcpy(sub_copy, subelement, subie_len); + + pos = &new_ie[0]; + + /* set new ssid */ + tmp_new = cfg80211_find_ie(WLAN_EID_SSID, sub_copy, subie_len); + if (tmp_new) { + memcpy(pos, tmp_new, tmp_new[1] + 2); + pos += (tmp_new[1] + 2); + } + + /* go through IEs in ie (skip SSID) and subelement, + * merge them into new_ie + */ + tmp_old = cfg80211_find_ie(WLAN_EID_SSID, ie, ielen); + tmp_old = (tmp_old) ? tmp_old + tmp_old[1] + 2 : ie; + + while (tmp_old + tmp_old[1] + 2 - ie <= ielen) { + if (tmp_old[0] == 0) { + tmp_old++; + continue; + } + + tmp = (u8 *)cfg80211_find_ie(tmp_old[0], sub_copy, subie_len); + if (!tmp) { + /* ie in old ie but not in subelement */ + if (tmp_old[0] != WLAN_EID_MULTIPLE_BSSID) { + memcpy(pos, tmp_old, tmp_old[1] + 2); + pos += tmp_old[1] + 2; + } + } else { + /* ie in transmitting ie also in subelement, + * copy from subelement and flag the ie in subelement + * as copied (by setting eid field to 0xff). For + * vendor ie, compare OUI + type + subType to + * determine if they are the same ie. + */ + if (tmp_old[0] == WLAN_EID_VENDOR_SPECIFIC) { + if (!memcmp(tmp_old + 2, tmp + 2, 5)) { + /* same vendor ie, copy from + * subelement + */ + memcpy(pos, tmp, tmp[1] + 2); + pos += tmp[1] + 2; + tmp[0] = 0xff; + } else { + memcpy(pos, tmp_old, tmp_old[1] + 2); + pos += tmp_old[1] + 2; + } + } else { + /* copy ie from subelement into new ie */ + memcpy(pos, tmp, tmp[1] + 2); + pos += tmp[1] + 2; + tmp[0] = 0xff; + } + } + + if (tmp_old + tmp_old[1] + 2 - ie == ielen) + break; + + tmp_old += tmp_old[1] + 2; + } + + /* go through subelement again to check if there is any ie not + * copied to new ie, skip ssid, capability, bssid-index ie + */ + tmp_new = sub_copy; + while (tmp_new + tmp_new[1] + 2 - sub_copy <= subie_len) { + if (!(tmp_new[0] == WLAN_EID_NON_TX_BSSID_CAP || + tmp_new[0] == WLAN_EID_SSID || + tmp_new[0] == WLAN_EID_MULTI_BSSID_IDX || + tmp_new[0] == 0xff)) { + memcpy(pos, tmp_new, tmp_new[1] + 2); + pos += tmp_new[1] + 2; + } + if (tmp_new + tmp_new[1] + 2 - sub_copy == subie_len) + break; + tmp_new += tmp_new[1] + 2; + } + + kfree(sub_copy); + return pos - new_ie; +} + +static bool is_bss(struct cfg80211_bss *a, const u8 *bssid, + const u8 *ssid, size_t ssid_len) +{ + const struct cfg80211_bss_ies *ies; + const u8 *ssidie; + + if (bssid && !ether_addr_equal(a->bssid, bssid)) + return false; + + if (!ssid) + return true; + + ies = rcu_access_pointer(a->ies); + if (!ies) + return false; + ssidie = cfg80211_find_ie(WLAN_EID_SSID, ies->data, ies->len); + if (!ssidie) + return false; + if (ssidie[1] != ssid_len) + return false; + return memcmp(ssidie + 2, ssid, ssid_len) == 0; +} + +static int +cfg80211_add_nontrans_list(struct cfg80211_bss *trans_bss, + struct cfg80211_bss *nontrans_bss) +{ + const u8 *ssid; + size_t ssid_len; + struct cfg80211_bss *bss = NULL; + + rcu_read_lock(); + ssid = ieee80211_bss_get_ie(nontrans_bss, WLAN_EID_SSID); + if (!ssid) { + rcu_read_unlock(); + return -EINVAL; + } + ssid_len = ssid[1]; + ssid = ssid + 2; + rcu_read_unlock(); + + /* check if nontrans_bss is in the list */ + list_for_each_entry(bss, &trans_bss->nontrans_list, nontrans_list) { + if (is_bss(bss, nontrans_bss->bssid, ssid, ssid_len)) + return 0; + } + + /* add to the list */ + list_add_tail(&nontrans_bss->nontrans_list, &trans_bss->nontrans_list); + return 0; +} + static void __cfg80211_bss_expire(struct cfg80211_registered_device *rdev, unsigned long expire_time) { @@ -523,29 +693,6 @@ const u8 *cfg80211_find_vendor_ie(unsigned int oui, int oui_type, } EXPORT_SYMBOL(cfg80211_find_vendor_ie); -static bool is_bss(struct cfg80211_bss *a, const u8 *bssid, - const u8 *ssid, size_t ssid_len) -{ - const struct cfg80211_bss_ies *ies; - const u8 *ssidie; - - if (bssid && !ether_addr_equal(a->bssid, bssid)) - return false; - - if (!ssid) - return true; - - ies = rcu_access_pointer(a->ies); - if (!ies) - return false; - ssidie = cfg80211_find_ie(WLAN_EID_SSID, ies->data, ies->len); - if (!ssidie) - return false; - if (ssidie[1] != ssid_len) - return false; - return memcmp(ssidie + 2, ssid, ssid_len) == 0; -} - /** * enum bss_compare_mode - BSS compare mode * @BSS_CMP_REGULAR: regular compare mode (for insertion and normal find) @@ -880,6 +1027,12 @@ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *rdev, return true; } +struct cfg80211_non_tx_bss { + struct cfg80211_bss *tx_bss; + u8 max_bssid_indicator; + u8 bssid_index; +}; + /* Returned bss is reference counted and must be cleaned up appropriately. */ static struct cfg80211_internal_bss * cfg80211_bss_update(struct cfg80211_registered_device *rdev, @@ -983,6 +1136,8 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev, memcpy(found->pub.chain_signal, tmp->pub.chain_signal, IEEE80211_MAX_CHAINS); ether_addr_copy(found->parent_bssid, tmp->parent_bssid); + found->pub.max_bssid_indicator = tmp->pub.max_bssid_indicator; + found->pub.bssid_index = tmp->pub.bssid_index; } else { struct cfg80211_internal_bss *new; struct cfg80211_internal_bss *hidden; @@ -1007,6 +1162,7 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev, memcpy(new, tmp, sizeof(*new)); new->refcount = 1; INIT_LIST_HEAD(&new->hidden_list); + INIT_LIST_HEAD(&new->pub.nontrans_list); if (rcu_access_pointer(tmp->pub.proberesp_ies)) { hidden = rb_find_bss(rdev, tmp, BSS_CMP_HIDE_ZLEN); @@ -1040,6 +1196,17 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev, goto drop; } + /* This must be before the call to bss_ref_get */ + if (tmp->pub.transmitted_bss) { + struct cfg80211_internal_bss *pbss = + container_of(tmp->pub.transmitted_bss, + struct cfg80211_internal_bss, + pub); + + new->pub.transmitted_bss = tmp->pub.transmitted_bss; + bss_ref_get(rdev, pbss); + } + list_add_tail(&new->list, &rdev->bss_list); rdev->bss_entries++; rb_insert_bss(rdev, new); @@ -1128,14 +1295,16 @@ cfg80211_get_bss_channel(struct wiphy *wiphy, const u8 *ie, size_t ielen, } /* Returned bss is reference counted and must be cleaned up appropriately. */ -struct cfg80211_bss * -cfg80211_inform_bss_data(struct wiphy *wiphy, - struct cfg80211_inform_bss *data, - enum cfg80211_bss_frame_type ftype, - const u8 *bssid, u64 tsf, u16 capability, - u16 beacon_interval, const u8 *ie, size_t ielen, - gfp_t gfp) +static struct cfg80211_bss * +cfg80211_inform_single_bss_data(struct wiphy *wiphy, + struct cfg80211_inform_bss *data, + enum cfg80211_bss_frame_type ftype, + const u8 *bssid, u64 tsf, u16 capability, + u16 beacon_interval, const u8 *ie, size_t ielen, + struct cfg80211_non_tx_bss *non_tx_data, + gfp_t gfp) { + struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); struct cfg80211_bss_ies *ies; struct ieee80211_channel *channel; struct cfg80211_internal_bss tmp = {}, *res; @@ -1161,6 +1330,11 @@ cfg80211_inform_bss_data(struct wiphy *wiphy, tmp.pub.beacon_interval = beacon_interval; tmp.pub.capability = capability; tmp.ts_boottime = data->boottime_ns; + if (non_tx_data) { + tmp.pub.transmitted_bss = non_tx_data->tx_bss; + tmp.pub.bssid_index = non_tx_data->bssid_index; + tmp.pub.max_bssid_indicator = non_tx_data->max_bssid_indicator; + } /* * If we do not know here whether the IEs are from a Beacon or Probe @@ -1207,19 +1381,246 @@ cfg80211_inform_bss_data(struct wiphy *wiphy, regulatory_hint_found_beacon(wiphy, channel, gfp); } + if (non_tx_data && non_tx_data->tx_bss) { + /* this is a nontransmitting bss, we need to add it to + * transmitting bss' list if it is not there + */ + if (cfg80211_add_nontrans_list(non_tx_data->tx_bss, + &res->pub)) { + if (__cfg80211_unlink_bss(rdev, res)) + rdev->bss_generation++; + } + } + trace_cfg80211_return_bss(&res->pub); /* cfg80211_bss_update gives us a referenced result */ return &res->pub; } -EXPORT_SYMBOL(cfg80211_inform_bss_data); -/* cfg80211_inform_bss_width_frame helper */ +static void cfg80211_parse_mbssid_data(struct wiphy *wiphy, + struct cfg80211_inform_bss *data, + enum cfg80211_bss_frame_type ftype, + const u8 *bssid, u64 tsf, + u16 beacon_interval, const u8 *ie, + size_t ielen, + struct cfg80211_non_tx_bss *non_tx_data, + gfp_t gfp) +{ + const u8 *mbssid_index_ie; + const struct element *elem, *sub; + size_t new_ie_len; + u8 new_bssid[ETH_ALEN]; + u8 *new_ie; + u16 capability; + struct cfg80211_bss *bss; + + if (!non_tx_data) + return; + if (!cfg80211_find_ie(WLAN_EID_MULTIPLE_BSSID, ie, ielen)) + return; + if (!wiphy->support_mbssid) + return; + if (wiphy->support_only_he_mbssid && + !cfg80211_find_ext_ie(WLAN_EID_EXT_HE_CAPABILITY, ie, ielen)) + return; + + new_ie = kmalloc(IEEE80211_MAX_DATA_LEN, gfp); + if (!new_ie) + return; + + for_each_element_id(elem, WLAN_EID_MULTIPLE_BSSID, ie, ielen) { + if (elem->datalen < 4) + continue; + for_each_element(sub, elem->data + 1, elem->datalen - 1) { + if (sub->id != 0 || sub->datalen < 4) { + /* not a valid BSS profile */ + continue; + } + + if (sub->data[0] != WLAN_EID_NON_TX_BSSID_CAP || + sub->data[1] != 2) { + /* The first element within the Nontransmitted + * BSSID Profile is not the Nontransmitted + * BSSID Capability element. + */ + continue; + } + + /* found a Nontransmitted BSSID Profile */ + mbssid_index_ie = cfg80211_find_ie + (WLAN_EID_MULTI_BSSID_IDX, + sub->data, sub->datalen); + if (!mbssid_index_ie || mbssid_index_ie[1] < 1 || + mbssid_index_ie[2] == 0) { + /* No valid Multiple BSSID-Index element */ + continue; + } + + non_tx_data->bssid_index = mbssid_index_ie[2]; + non_tx_data->max_bssid_indicator = elem->data[0]; + + cfg80211_gen_new_bssid(bssid, + non_tx_data->max_bssid_indicator, + non_tx_data->bssid_index, + new_bssid); + memset(new_ie, 0, IEEE80211_MAX_DATA_LEN); + new_ie_len = cfg80211_gen_new_ie(ie, ielen, sub->data, + sub->datalen, new_ie, + gfp); + if (!new_ie_len) + continue; + + capability = get_unaligned_le16(sub->data + 2); + bss = cfg80211_inform_single_bss_data(wiphy, data, + ftype, + new_bssid, tsf, + capability, + beacon_interval, + new_ie, + new_ie_len, + non_tx_data, + gfp); + if (!bss) + break; + cfg80211_put_bss(wiphy, bss); + } + } + + kfree(new_ie); +} + struct cfg80211_bss * -cfg80211_inform_bss_frame_data(struct wiphy *wiphy, - struct cfg80211_inform_bss *data, - struct ieee80211_mgmt *mgmt, size_t len, - gfp_t gfp) +cfg80211_inform_bss_data(struct wiphy *wiphy, + struct cfg80211_inform_bss *data, + enum cfg80211_bss_frame_type ftype, + const u8 *bssid, u64 tsf, u16 capability, + u16 beacon_interval, const u8 *ie, size_t ielen, + gfp_t gfp) +{ + struct cfg80211_bss *res; + struct cfg80211_non_tx_bss non_tx_data; + + res = cfg80211_inform_single_bss_data(wiphy, data, ftype, bssid, tsf, + capability, beacon_interval, ie, + ielen, NULL, gfp); + non_tx_data.tx_bss = res; + cfg80211_parse_mbssid_data(wiphy, data, ftype, bssid, tsf, + beacon_interval, ie, ielen, &non_tx_data, + gfp); + return res; +} +EXPORT_SYMBOL(cfg80211_inform_bss_data); +static void +cfg80211_parse_mbssid_frame_data(struct wiphy *wiphy, + struct cfg80211_inform_bss *data, + struct ieee80211_mgmt *mgmt, size_t len, + struct cfg80211_non_tx_bss *non_tx_data, + gfp_t gfp) +{ + enum cfg80211_bss_frame_type ftype; + const u8 *ie = mgmt->u.probe_resp.variable; + size_t ielen = len - offsetof(struct ieee80211_mgmt, + u.probe_resp.variable); + + ftype = ieee80211_is_beacon(mgmt->frame_control) ? + CFG80211_BSS_FTYPE_BEACON : CFG80211_BSS_FTYPE_PRESP; + + cfg80211_parse_mbssid_data(wiphy, data, ftype, mgmt->bssid, + le64_to_cpu(mgmt->u.probe_resp.timestamp), + le16_to_cpu(mgmt->u.probe_resp.beacon_int), + ie, ielen, non_tx_data, gfp); +} + +static void +cfg80211_update_notlisted_nontrans(struct wiphy *wiphy, + struct cfg80211_bss *nontrans_bss, + struct ieee80211_mgmt *mgmt, size_t len, + gfp_t gfp) +{ + u8 *ie, *new_ie, *pos; + const u8 *nontrans_ssid, *trans_ssid, *mbssid; + size_t ielen = len - offsetof(struct ieee80211_mgmt, + u.probe_resp.variable); + size_t new_ie_len; + struct cfg80211_bss_ies *new_ies; + const struct cfg80211_bss_ies *old; + u8 cpy_len; + + ie = mgmt->u.probe_resp.variable; + + new_ie_len = ielen; + trans_ssid = cfg80211_find_ie(WLAN_EID_SSID, ie, ielen); + if (!trans_ssid) + return; + new_ie_len -= trans_ssid[1]; + mbssid = cfg80211_find_ie(WLAN_EID_MULTIPLE_BSSID, ie, ielen); + if (!mbssid) + return; + new_ie_len -= mbssid[1]; + rcu_read_lock(); + nontrans_ssid = ieee80211_bss_get_ie(nontrans_bss, WLAN_EID_SSID); + if (!nontrans_ssid) { + rcu_read_unlock(); + return; + } + new_ie_len += nontrans_ssid[1]; + rcu_read_unlock(); + + /* generate new ie for nontrans BSS + * 1. replace SSID with nontrans BSS' SSID + * 2. skip MBSSID IE + */ + new_ie = kzalloc(new_ie_len, gfp); + if (!new_ie) + return; + new_ies = kzalloc(sizeof(*new_ies) + new_ie_len, gfp); + if (!new_ies) { + kfree(new_ie); + return; + } + + pos = new_ie; + + /* copy the nontransmitted SSID */ + cpy_len = nontrans_ssid[1] + 2; + memcpy(pos, nontrans_ssid, cpy_len); + pos += cpy_len; + /* copy the IEs between SSID and MBSSID */ + cpy_len = trans_ssid[1] + 2; + memcpy(pos, (trans_ssid + cpy_len), (mbssid - (trans_ssid + cpy_len))); + pos += (mbssid - (trans_ssid + cpy_len)); + /* copy the IEs after MBSSID */ + cpy_len = mbssid[1] + 2; + memcpy(pos, mbssid + cpy_len, ((ie + ielen) - (mbssid + cpy_len))); + + /* update ie */ + new_ies->len = new_ie_len; + new_ies->tsf = le64_to_cpu(mgmt->u.probe_resp.timestamp); + new_ies->from_beacon = ieee80211_is_beacon(mgmt->frame_control); + memcpy(new_ies->data, new_ie, new_ie_len); + if (ieee80211_is_probe_resp(mgmt->frame_control)) { + old = rcu_access_pointer(nontrans_bss->proberesp_ies); + rcu_assign_pointer(nontrans_bss->proberesp_ies, new_ies); + rcu_assign_pointer(nontrans_bss->ies, new_ies); + if (old) + kfree_rcu((struct cfg80211_bss_ies *)old, rcu_head); + } else { + old = rcu_access_pointer(nontrans_bss->beacon_ies); + rcu_assign_pointer(nontrans_bss->beacon_ies, new_ies); + rcu_assign_pointer(nontrans_bss->ies, new_ies); + if (old) + kfree_rcu((struct cfg80211_bss_ies *)old, rcu_head); + } +} + +/* cfg80211_inform_bss_width_frame helper */ +static struct cfg80211_bss * +cfg80211_inform_single_bss_frame_data(struct wiphy *wiphy, + struct cfg80211_inform_bss *data, + struct ieee80211_mgmt *mgmt, size_t len, + struct cfg80211_non_tx_bss *non_tx_data, + gfp_t gfp) { struct cfg80211_internal_bss tmp = {}, *res; struct cfg80211_bss_ies *ies; @@ -1277,6 +1678,11 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy, tmp.pub.chains = data->chains; memcpy(tmp.pub.chain_signal, data->chain_signal, IEEE80211_MAX_CHAINS); ether_addr_copy(tmp.parent_bssid, data->parent_bssid); + if (non_tx_data) { + tmp.pub.transmitted_bss = non_tx_data->tx_bss; + tmp.pub.bssid_index = non_tx_data->bssid_index; + tmp.pub.max_bssid_indicator = non_tx_data->max_bssid_indicator; + } signal_valid = abs(data->chan->center_freq - channel->center_freq) <= wiphy->max_adj_channel_rssi_comp; @@ -1298,6 +1704,53 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy, /* cfg80211_bss_update gives us a referenced result */ return &res->pub; } + +struct cfg80211_bss * +cfg80211_inform_bss_frame_data(struct wiphy *wiphy, + struct cfg80211_inform_bss *data, + struct ieee80211_mgmt *mgmt, size_t len, + gfp_t gfp) +{ + struct cfg80211_bss *res, *tmp_bss; + const u8 *ie = mgmt->u.probe_resp.variable; + const struct cfg80211_bss_ies *ies1, *ies2; + size_t ielen = len - offsetof(struct ieee80211_mgmt, + u.probe_resp.variable); + struct cfg80211_non_tx_bss non_tx_data; + + res = cfg80211_inform_single_bss_frame_data(wiphy, data, mgmt, + len, NULL, gfp); + if (!res || !wiphy->support_mbssid || + !cfg80211_find_ie(WLAN_EID_MULTIPLE_BSSID, ie, ielen)) + return res; + if (wiphy->support_only_he_mbssid && + !cfg80211_find_ext_ie(WLAN_EID_EXT_HE_CAPABILITY, ie, ielen)) + return res; + + non_tx_data.tx_bss = res; + /* process each non-transmitting bss */ + cfg80211_parse_mbssid_frame_data(wiphy, data, mgmt, len, + &non_tx_data, gfp); + + /* check if the res has other nontransmitting bss which is not + * in MBSSID IE + */ + ies1 = rcu_access_pointer(res->ies); + + /* go through nontrans_list, if the timestamp of the BSS is + * earlier than the timestamp of the transmitting BSS then + * update it + */ + list_for_each_entry(tmp_bss, &res->nontrans_list, + nontrans_list) { + ies2 = rcu_access_pointer(tmp_bss->ies); + if (ies2->tsf < ies1->tsf) + cfg80211_update_notlisted_nontrans(wiphy, tmp_bss, + mgmt, len, gfp); + } + + return res; +} EXPORT_SYMBOL(cfg80211_inform_bss_frame_data); void cfg80211_ref_bss(struct wiphy *wiphy, struct cfg80211_bss *pub) @@ -1335,7 +1788,8 @@ EXPORT_SYMBOL(cfg80211_put_bss); void cfg80211_unlink_bss(struct wiphy *wiphy, struct cfg80211_bss *pub) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); - struct cfg80211_internal_bss *bss; + struct cfg80211_internal_bss *bss, *tmp1; + struct cfg80211_bss *nontrans_bss, *tmp; if (WARN_ON(!pub)) return; @@ -1343,10 +1797,21 @@ void cfg80211_unlink_bss(struct wiphy *wiphy, struct cfg80211_bss *pub) bss = container_of(pub, struct cfg80211_internal_bss, pub); spin_lock_bh(&rdev->bss_lock); - if (!list_empty(&bss->list)) { - if (__cfg80211_unlink_bss(rdev, bss)) + if (list_empty(&bss->list)) + goto out; + + list_for_each_entry_safe(nontrans_bss, tmp, + &pub->nontrans_list, + nontrans_list) { + tmp1 = container_of(nontrans_bss, + struct cfg80211_internal_bss, pub); + if (__cfg80211_unlink_bss(rdev, tmp1)) rdev->bss_generation++; } + + if (__cfg80211_unlink_bss(rdev, bss)) + rdev->bss_generation++; +out: spin_unlock_bh(&rdev->bss_lock); } EXPORT_SYMBOL(cfg80211_unlink_bss); diff --git a/net/wireless/sme.c b/net/wireless/sme.c index f455b9af6815cd799d928a1753ad59293cda0d54..ae52143d6dca4270888f473bf74ab8116f4009a5 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c @@ -1087,9 +1087,16 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie, * Delete all the keys ... pairwise keys can't really * exist any more anyway, but default keys might. */ - if (rdev->ops->del_key) - for (i = 0; i < 6; i++) + if (rdev->ops->del_key) { + int max_key_idx = 5; + + if (wiphy_ext_feature_isset( + wdev->wiphy, + NL80211_EXT_FEATURE_BEACON_PROTECTION)) + max_key_idx = 7; + for (i = 0; i <= max_key_idx; i++) rdev_del_key(rdev, dev, i, false, NULL); + } rdev_set_qos_map(rdev, dev, NULL); diff --git a/net/wireless/trace.h b/net/wireless/trace.h index 54b0bb344cf93811c96b36ddd21709b889741123..713c15d6c5e614234c838f82a0c5ea115b3b0c01 100644 --- a/net/wireless/trace.h +++ b/net/wireless/trace.h @@ -471,6 +471,23 @@ TRACE_EVENT(rdev_set_default_mgmt_key, WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->key_index) ); +TRACE_EVENT(rdev_set_default_beacon_key, + TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 key_index), + TP_ARGS(wiphy, netdev, key_index), + TP_STRUCT__entry( + WIPHY_ENTRY + NETDEV_ENTRY + __field(u8, key_index) + ), + TP_fast_assign( + WIPHY_ASSIGN; + NETDEV_ASSIGN; + __entry->key_index = key_index; + ), + TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", key index: %u", + WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->key_index) +); + TRACE_EVENT(rdev_start_ap, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, struct cfg80211_ap_settings *settings), @@ -3264,6 +3281,44 @@ TRACE_EVENT(rdev_get_txq_stats, ), TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT, WIPHY_PR_ARG, WDEV_PR_ARG) ); + +TRACE_EVENT(rdev_update_owe_info, + TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, + struct cfg80211_update_owe_info *owe_info), + TP_ARGS(wiphy, netdev, owe_info), + TP_STRUCT__entry(WIPHY_ENTRY + NETDEV_ENTRY + MAC_ENTRY(peer) + __field(u16, status) + __dynamic_array(u8, ie, owe_info->ie_len)), + TP_fast_assign(WIPHY_ASSIGN; + NETDEV_ASSIGN; + MAC_ASSIGN(peer, owe_info->peer); + __entry->status = owe_info->status; + memcpy(__get_dynamic_array(ie), + owe_info->ie, owe_info->ie_len);), + TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", peer: " MAC_PR_FMT + " status %d", WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(peer), + __entry->status) +); + +TRACE_EVENT(cfg80211_update_owe_info_event, + TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, + struct cfg80211_update_owe_info *owe_info), + TP_ARGS(wiphy, netdev, owe_info), + TP_STRUCT__entry(WIPHY_ENTRY + NETDEV_ENTRY + MAC_ENTRY(peer) + __dynamic_array(u8, ie, owe_info->ie_len)), + TP_fast_assign(WIPHY_ASSIGN; + NETDEV_ASSIGN; + MAC_ASSIGN(peer, owe_info->peer); + memcpy(__get_dynamic_array(ie), owe_info->ie, + owe_info->ie_len);), + TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", peer: " MAC_PR_FMT, + WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(peer)) +); + #endif /* !__RDEV_OPS_TRACE || TRACE_HEADER_MULTI_READ */ #undef TRACE_INCLUDE_PATH diff --git a/net/wireless/util.c b/net/wireless/util.c index 637be3f87c28c1a7221a5e6b1d9d4585611a08ec..7217c3ddc44f20b698530b329446143eef9c026b 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -218,7 +218,12 @@ int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev, struct key_params *params, int key_idx, bool pairwise, const u8 *mac_addr) { - if (key_idx < 0 || key_idx > 5) + int max_key_idx = 5; + + if (wiphy_ext_feature_isset(&rdev->wiphy, + NL80211_EXT_FEATURE_BEACON_PROTECTION)) + max_key_idx = 7; + if (key_idx < 0 || key_idx > max_key_idx) return -EINVAL; if (!pairwise && mac_addr && !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN)) diff --git a/net/x25/x25_dev.c b/net/x25/x25_dev.c index 39231237e1c3b3806dc956429195a59edf46bfd6..30f71620d4e3f3273fc5e682eee5fd3112acab55 100644 --- a/net/x25/x25_dev.c +++ b/net/x25/x25_dev.c @@ -120,8 +120,10 @@ int x25_lapb_receive_frame(struct sk_buff *skb, struct net_device *dev, goto drop; } - if (!pskb_may_pull(skb, 1)) + if (!pskb_may_pull(skb, 1)) { + x25_neigh_put(nb); return 0; + } switch (skb->data[0]) { diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c index 556a649512b603e3222e2515c97739f6c8e2268a..706fad12f22cff39b6d898b821340ef9ae185cfb 100644 --- a/net/xdp/xdp_umem.c +++ b/net/xdp/xdp_umem.c @@ -260,7 +260,7 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr) u32 chunk_size = mr->chunk_size, headroom = mr->headroom; unsigned int chunks, chunks_per_page; u64 addr = mr->addr, size = mr->len; - int size_chk, err, i; + int err, i; if (chunk_size < XDP_UMEM_MIN_CHUNK_SIZE || chunk_size > PAGE_SIZE) { /* Strictly speaking we could support this, if: @@ -295,8 +295,7 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr) headroom = ALIGN(headroom, 64); - size_chk = chunk_size - headroom - XDP_PACKET_HEADROOM; - if (size_chk < 0) + if (headroom >= chunk_size - XDP_PACKET_HEADROOM) return -EINVAL; umem->address = (unsigned long)addr; diff --git a/samples/vfio-mdev/mdpy.c b/samples/vfio-mdev/mdpy.c index 96e7969c473aa47bc6ff597771b3934d20244f9d..d774717cd9066048722a8492f41b5892e0f64315 100644 --- a/samples/vfio-mdev/mdpy.c +++ b/samples/vfio-mdev/mdpy.c @@ -418,7 +418,7 @@ static int mdpy_mmap(struct mdev_device *mdev, struct vm_area_struct *vma) return -EINVAL; return remap_vmalloc_range_partial(vma, vma->vm_start, - mdev_state->memblk, + mdev_state->memblk, 0, vma->vm_end - vma->vm_start); } diff --git a/scripts/config b/scripts/config index e0e39826dae90ab735d1510e882bd32a9536e005..eee5b7f3a092ae4ac46bcdf92ffe8bb3c643aafd 100755 --- a/scripts/config +++ b/scripts/config @@ -7,6 +7,9 @@ myname=${0##*/} # If no prefix forced, use the default CONFIG_ CONFIG_="${CONFIG_-CONFIG_}" +# We use an uncommon delimiter for sed substitutions +SED_DELIM=$(echo -en "\001") + usage() { cat >&2 <"$tmpfile" + sed -e "s$SED_DELIM$before$SED_DELIM$after$SED_DELIM" "$infile" >"$tmpfile" # replace original file with the edited one mv "$tmpfile" "$infile" } diff --git a/scripts/decodecode b/scripts/decodecode index 9cef558528aac890dfc2d2545aa9eb2fd8be3934..eeaa435d1bd23253b66176bb616707af7ad842c3 100755 --- a/scripts/decodecode +++ b/scripts/decodecode @@ -119,7 +119,7 @@ faultlinenum=$(( $(wc -l $T.oo | cut -d" " -f1) - \ faultline=`cat $T.dis | head -1 | cut -d":" -f2-` faultline=`echo "$faultline" | sed -e 's/\[/\\\[/g; s/\]/\\\]/g'` -cat $T.oo | sed -e "${faultlinenum}s/^\(.*:\)\(.*\)/\1\*\2\t\t<-- trapping instruction/" +cat $T.oo | sed -e "${faultlinenum}s/^\([^:]*:\)\(.*\)/\1\*\2\t\t<-- trapping instruction/" echo cat $T.aa cleanup diff --git a/scripts/kconfig/qconf.cc b/scripts/kconfig/qconf.cc index ef4310f2558b1be23c3e8705e8a9a11e97ec03ff..8f004db6f6034d21efff9ef99396ae0a6e8a4a73 100644 --- a/scripts/kconfig/qconf.cc +++ b/scripts/kconfig/qconf.cc @@ -627,7 +627,7 @@ void ConfigList::updateMenuList(ConfigItem *parent, struct menu* menu) last = item; continue; } - hide: +hide: if (item && item->menu == child) { last = parent->firstChild(); if (last == item) @@ -692,7 +692,7 @@ void ConfigList::updateMenuList(ConfigList *parent, struct menu* menu) last = item; continue; } - hide: +hide: if (item && item->menu == child) { last = (ConfigItem*)parent->topLevelItem(0); if (last == item) @@ -1225,10 +1225,11 @@ QMenu* ConfigInfoView::createStandardContextMenu(const QPoint & pos) { QMenu* popup = Parent::createStandardContextMenu(pos); QAction* action = new QAction("Show Debug Info", popup); - action->setCheckable(true); - connect(action, SIGNAL(toggled(bool)), SLOT(setShowDebug(bool))); - connect(this, SIGNAL(showDebugChanged(bool)), action, SLOT(setOn(bool))); - action->setChecked(showDebug()); + + action->setCheckable(true); + connect(action, SIGNAL(toggled(bool)), SLOT(setShowDebug(bool))); + connect(this, SIGNAL(showDebugChanged(bool)), action, SLOT(setOn(bool))); + action->setChecked(showDebug()); popup->addSeparator(); popup->addAction(action); return popup; diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c index f9ab34811dbf616a610a98e5e0c49601b5b39c17..cc1b7ece676e0fcefce51e155811f6b3c65f4220 100644 --- a/scripts/mod/modpost.c +++ b/scripts/mod/modpost.c @@ -2164,8 +2164,12 @@ static int check_modname_len(struct module *mod) **/ static void add_header(struct buffer *b, struct module *mod) { - buf_printf(b, "#include \n"); buf_printf(b, "#include \n"); + /* + * Include build-salt.h after module.h in order to + * inherit the definitions. + */ + buf_printf(b, "#include \n"); buf_printf(b, "#include \n"); buf_printf(b, "#include \n"); buf_printf(b, "\n"); diff --git a/security/keys/big_key.c b/security/keys/big_key.c index 2806e70d7f8fcddfd1b39a1fed8114975ff1c82c..630594a5b46e6e9624330e1278f7a79ef0980eb6 100644 --- a/security/keys/big_key.c +++ b/security/keys/big_key.c @@ -356,7 +356,7 @@ void big_key_describe(const struct key *key, struct seq_file *m) * read the key data * - the key's semaphore is read-locked */ -long big_key_read(const struct key *key, char __user *buffer, size_t buflen) +long big_key_read(const struct key *key, char *buffer, size_t buflen) { size_t datalen = (size_t)key->payload.data[big_key_len]; long ret; @@ -395,9 +395,8 @@ long big_key_read(const struct key *key, char __user *buffer, size_t buflen) ret = datalen; - /* copy decrypted data to user */ - if (copy_to_user(buffer, buf->virt, datalen) != 0) - ret = -EFAULT; + /* copy out decrypted data */ + memcpy(buffer, buf->virt, datalen); err_fput: fput(file); @@ -405,9 +404,7 @@ long big_key_read(const struct key *key, char __user *buffer, size_t buflen) big_key_free_buffer(buf); } else { ret = datalen; - if (copy_to_user(buffer, key->payload.data[big_key_data], - datalen) != 0) - ret = -EFAULT; + memcpy(buffer, key->payload.data[big_key_data], datalen); } return ret; diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c index d92cbf9687c33f090865f6d0caa99d9936f49e3a..571f6d486838ec2af8af5fecc6928ac7b5fcd785 100644 --- a/security/keys/encrypted-keys/encrypted.c +++ b/security/keys/encrypted-keys/encrypted.c @@ -895,14 +895,14 @@ static int encrypted_update(struct key *key, struct key_preparsed_payload *prep) } /* - * encrypted_read - format and copy the encrypted data to userspace + * encrypted_read - format and copy out the encrypted data * * The resulting datablob format is: * * * On success, return to userspace the encrypted key datablob size. */ -static long encrypted_read(const struct key *key, char __user *buffer, +static long encrypted_read(const struct key *key, char *buffer, size_t buflen) { struct encrypted_key_payload *epayload; @@ -950,8 +950,7 @@ static long encrypted_read(const struct key *key, char __user *buffer, key_put(mkey); memzero_explicit(derived_key, sizeof(derived_key)); - if (copy_to_user(buffer, ascii_buf, asciiblob_len) != 0) - ret = -EFAULT; + memcpy(buffer, ascii_buf, asciiblob_len); kzfree(ascii_buf); return asciiblob_len; diff --git a/security/keys/internal.h b/security/keys/internal.h index d91833e7af9c2f0d6f215792a99871a2ad8670f1..92d74b9e0149f7dbac4d65951c13dc04a41150ef 100644 --- a/security/keys/internal.h +++ b/security/keys/internal.h @@ -20,6 +20,8 @@ #include #include #include +#include +#include struct iovec; @@ -303,4 +305,14 @@ static inline void key_check(const struct key *key) #endif +/* + * Helper function to clear and free a kvmalloc'ed memory object. + */ +static inline void __kvzfree(const void *addr, size_t len) +{ + if (addr) { + memset((void *)addr, 0, len); + kvfree(addr); + } +} #endif /* _INTERNAL_H */ diff --git a/security/keys/key.c b/security/keys/key.c index 749a5cf27a192efca427cfba758d49d89b199b0b..d5fa8c4fc5544ac597991d8b5496b603d38528e5 100644 --- a/security/keys/key.c +++ b/security/keys/key.c @@ -383,7 +383,7 @@ int key_payload_reserve(struct key *key, size_t datalen) spin_lock(&key->user->lock); if (delta > 0 && - (key->user->qnbytes + delta >= maxbytes || + (key->user->qnbytes + delta > maxbytes || key->user->qnbytes + delta < key->user->qnbytes)) { ret = -EDQUOT; } diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c index ca31af186abd8a71aa2721d06ede9e7f30970974..c07c2e2b2478399328d3d8b445adc2d837079257 100644 --- a/security/keys/keyctl.c +++ b/security/keys/keyctl.c @@ -330,7 +330,7 @@ long keyctl_update_key(key_serial_t id, payload = NULL; if (plen) { ret = -ENOMEM; - payload = kmalloc(plen, GFP_KERNEL); + payload = kvmalloc(plen, GFP_KERNEL); if (!payload) goto error; @@ -351,7 +351,7 @@ long keyctl_update_key(key_serial_t id, key_ref_put(key_ref); error2: - kzfree(payload); + __kvzfree(payload, plen); error: return ret; } @@ -742,6 +742,21 @@ long keyctl_keyring_search(key_serial_t ringid, return ret; } +/* + * Call the read method + */ +static long __keyctl_read_key(struct key *key, char *buffer, size_t buflen) +{ + long ret; + + down_read(&key->sem); + ret = key_validate(key); + if (ret == 0) + ret = key->type->read(key, buffer, buflen); + up_read(&key->sem); + return ret; +} + /* * Read a key's payload. * @@ -757,26 +772,28 @@ long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen) struct key *key; key_ref_t key_ref; long ret; + char *key_data = NULL; + size_t key_data_len; /* find the key first */ key_ref = lookup_user_key(keyid, 0, 0); if (IS_ERR(key_ref)) { ret = -ENOKEY; - goto error; + goto out; } key = key_ref_to_ptr(key_ref); ret = key_read_state(key); if (ret < 0) - goto error2; /* Negatively instantiated */ + goto key_put_out; /* Negatively instantiated */ /* see if we can read it directly */ ret = key_permission(key_ref, KEY_NEED_READ); if (ret == 0) goto can_read_key; if (ret != -EACCES) - goto error2; + goto key_put_out; /* we can't; see if it's searchable from this process's keyrings * - we automatically take account of the fact that it may be @@ -784,26 +801,78 @@ long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen) */ if (!is_key_possessed(key_ref)) { ret = -EACCES; - goto error2; + goto key_put_out; } /* the key is probably readable - now try to read it */ can_read_key: - ret = -EOPNOTSUPP; - if (key->type->read) { - /* Read the data with the semaphore held (since we might sleep) - * to protect against the key being updated or revoked. + if (!key->type->read) { + ret = -EOPNOTSUPP; + goto key_put_out; + } + + if (!buffer || !buflen) { + /* Get the key length from the read method */ + ret = __keyctl_read_key(key, NULL, 0); + goto key_put_out; + } + + /* + * Read the data with the semaphore held (since we might sleep) + * to protect against the key being updated or revoked. + * + * Allocating a temporary buffer to hold the keys before + * transferring them to user buffer to avoid potential + * deadlock involving page fault and mmap_sem. + * + * key_data_len = (buflen <= PAGE_SIZE) + * ? buflen : actual length of key data + * + * This prevents allocating arbitrary large buffer which can + * be much larger than the actual key length. In the latter case, + * at least 2 passes of this loop is required. + */ + key_data_len = (buflen <= PAGE_SIZE) ? buflen : 0; + for (;;) { + if (key_data_len) { + key_data = kvmalloc(key_data_len, GFP_KERNEL); + if (!key_data) { + ret = -ENOMEM; + goto key_put_out; + } + } + + ret = __keyctl_read_key(key, key_data, key_data_len); + + /* + * Read methods will just return the required length without + * any copying if the provided length isn't large enough. + */ + if (ret <= 0 || ret > buflen) + break; + + /* + * The key may change (unlikely) in between 2 consecutive + * __keyctl_read_key() calls. In this case, we reallocate + * a larger buffer and redo the key read when + * key_data_len < ret <= buflen. */ - down_read(&key->sem); - ret = key_validate(key); - if (ret == 0) - ret = key->type->read(key, buffer, buflen); - up_read(&key->sem); + if (ret > key_data_len) { + if (unlikely(key_data)) + __kvzfree(key_data, key_data_len); + key_data_len = ret; + continue; /* Allocate buffer */ + } + + if (copy_to_user(buffer, key_data, ret)) + ret = -EFAULT; + break; } + __kvzfree(key_data, key_data_len); -error2: +key_put_out: key_put(key); -error: +out: return ret; } @@ -882,8 +951,8 @@ long keyctl_chown_key(key_serial_t id, uid_t user, gid_t group) key_quota_root_maxbytes : key_quota_maxbytes; spin_lock(&newowner->lock); - if (newowner->qnkeys + 1 >= maxkeys || - newowner->qnbytes + key->quotalen >= maxbytes || + if (newowner->qnkeys + 1 > maxkeys || + newowner->qnbytes + key->quotalen > maxbytes || newowner->qnbytes + key->quotalen < newowner->qnbytes) goto quota_overrun; diff --git a/security/keys/keyring.c b/security/keys/keyring.c index 99a55145ddcd22424bc510e14fecbe8c4785344b..e8f2366021ea3efcead0ef7b61ea4fee65deed4b 100644 --- a/security/keys/keyring.c +++ b/security/keys/keyring.c @@ -432,7 +432,6 @@ static int keyring_read_iterator(const void *object, void *data) { struct keyring_read_iterator_context *ctx = data; const struct key *key = keyring_ptr_to_key(object); - int ret; kenter("{%s,%d},,{%zu/%zu}", key->type->name, key->serial, ctx->count, ctx->buflen); @@ -440,10 +439,7 @@ static int keyring_read_iterator(const void *object, void *data) if (ctx->count >= ctx->buflen) return 1; - ret = put_user(key->serial, ctx->buffer); - if (ret < 0) - return ret; - ctx->buffer++; + *ctx->buffer++ = key->serial; ctx->count += sizeof(key->serial); return 0; } diff --git a/security/keys/proc.c b/security/keys/proc.c index d38be9db2cc07669d1bc68863161f3a2b10cb1fe..7ec2779cb089b5d064fa35b9ccf94b3c8c5c8147 100644 --- a/security/keys/proc.c +++ b/security/keys/proc.c @@ -144,6 +144,8 @@ static void *proc_keys_next(struct seq_file *p, void *v, loff_t *_pos) n = key_serial_next(p, v); if (n) *_pos = key_node_serial(n); + else + (*_pos)++; return n; } diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c index 1d34b2a5f485e2380e8b092cfb62955434c3f908..13ac3b1e57da61ea6bb8af8f46c875fe61e38a7d 100644 --- a/security/keys/request_key_auth.c +++ b/security/keys/request_key_auth.c @@ -27,7 +27,7 @@ static int request_key_auth_instantiate(struct key *, static void request_key_auth_describe(const struct key *, struct seq_file *); static void request_key_auth_revoke(struct key *); static void request_key_auth_destroy(struct key *); -static long request_key_auth_read(const struct key *, char __user *, size_t); +static long request_key_auth_read(const struct key *, char *, size_t); /* * The request-key authorisation key type definition. @@ -85,7 +85,7 @@ static void request_key_auth_describe(const struct key *key, * - the key's semaphore is read-locked */ static long request_key_auth_read(const struct key *key, - char __user *buffer, size_t buflen) + char *buffer, size_t buflen) { struct request_key_auth *rka = get_request_key_auth(key); size_t datalen; @@ -102,8 +102,7 @@ static long request_key_auth_read(const struct key *key, if (buflen > datalen) buflen = datalen; - if (copy_to_user(buffer, rka->callout_info, buflen) != 0) - ret = -EFAULT; + memcpy(buffer, rka->callout_info, buflen); } return ret; diff --git a/security/keys/trusted.c b/security/keys/trusted.c index b69d3b1777c25d1d3f9cc5af3514352ed0220fcc..09545c42977e809da40c342c64319af45d1bd3e8 100644 --- a/security/keys/trusted.c +++ b/security/keys/trusted.c @@ -1135,11 +1135,10 @@ static int trusted_update(struct key *key, struct key_preparsed_payload *prep) * trusted_read - copy the sealed blob data to userspace in hex. * On success, return to userspace the trusted key datablob size. */ -static long trusted_read(const struct key *key, char __user *buffer, +static long trusted_read(const struct key *key, char *buffer, size_t buflen) { const struct trusted_key_payload *p; - char *ascii_buf; char *bufp; int i; @@ -1148,18 +1147,9 @@ static long trusted_read(const struct key *key, char __user *buffer, return -EINVAL; if (buffer && buflen >= 2 * p->blob_len) { - ascii_buf = kmalloc_array(2, p->blob_len, GFP_KERNEL); - if (!ascii_buf) - return -ENOMEM; - - bufp = ascii_buf; + bufp = buffer; for (i = 0; i < p->blob_len; i++) bufp = hex_byte_pack(bufp, p->blob[i]); - if (copy_to_user(buffer, ascii_buf, 2 * p->blob_len) != 0) { - kzfree(ascii_buf); - return -EFAULT; - } - kzfree(ascii_buf); } return 2 * p->blob_len; } diff --git a/security/keys/user_defined.c b/security/keys/user_defined.c index 9f558bedba23a338da5980ab11dd1b716c7b0b67..0e723b676aef1c7e5bfcb4abf6d75e531513b2da 100644 --- a/security/keys/user_defined.c +++ b/security/keys/user_defined.c @@ -172,7 +172,7 @@ EXPORT_SYMBOL_GPL(user_describe); * read the key data * - the key's semaphore is read-locked */ -long user_read(const struct key *key, char __user *buffer, size_t buflen) +long user_read(const struct key *key, char *buffer, size_t buflen) { const struct user_key_payload *upayload; long ret; @@ -185,8 +185,7 @@ long user_read(const struct key *key, char __user *buffer, size_t buflen) if (buflen > upayload->datalen) buflen = upayload->datalen; - if (copy_to_user(buffer, upayload->data, buflen) != 0) - ret = -EFAULT; + memcpy(buffer, upayload->data, buflen); } return ret; diff --git a/security/min_addr.c b/security/min_addr.c index 94d2b0cf0e7b9cc493e41c3fea9c42376e784268..37eb525ac172fc1a1e639f50a95ce3303a252f10 100644 --- a/security/min_addr.c +++ b/security/min_addr.c @@ -6,6 +6,8 @@ /* amount of vm to protect from userspace access by both DAC and the LSM*/ unsigned long mmap_min_addr; +EXPORT_SYMBOL_GPL(mmap_min_addr); + /* amount of vm to protect from userspace using CAP_SYS_RAWIO (DAC) */ unsigned long dac_mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR; /* amount of vm to protect from userspace using the LSM = CONFIG_LSM_MMAP_MIN_ADDR */ diff --git a/security/security.c b/security/security.c index 4a32448e7d267e04b5ad6d641dab285124a20352..a25f7f46f896c02e37b3adef36b0c5342f75f053 100644 --- a/security/security.c +++ b/security/security.c @@ -939,6 +939,7 @@ int security_mmap_addr(unsigned long addr) { return call_int_hook(mmap_addr, 0, addr); } +EXPORT_SYMBOL_GPL(security_mmap_addr); int security_file_mprotect(struct vm_area_struct *vma, unsigned long reqprot, unsigned long prot) diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 2efc0f342ce50e2ba4ff3185d811423128c316af..ceb4621b3a99676ee9b09580ec09ae5f71c9daf7 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -5596,40 +5596,60 @@ static int selinux_tun_dev_open(void *security) static int selinux_nlmsg_perm(struct sock *sk, struct sk_buff *skb) { - int err = 0; - u32 perm; + int rc = 0; + unsigned int msg_len; + unsigned int data_len = skb->len; + unsigned char *data = skb->data; struct nlmsghdr *nlh; struct sk_security_struct *sksec = sk->sk_security; + u16 sclass = sksec->sclass; + u32 perm; - if (skb->len < NLMSG_HDRLEN) { - err = -EINVAL; - goto out; - } - nlh = nlmsg_hdr(skb); + while (data_len >= nlmsg_total_size(0)) { + nlh = (struct nlmsghdr *)data; + + /* NOTE: the nlmsg_len field isn't reliably set by some netlink + * users which means we can't reject skb's with bogus + * length fields; our solution is to follow what + * netlink_rcv_skb() does and simply skip processing at + * messages with length fields that are clearly junk + */ + if (nlh->nlmsg_len < NLMSG_HDRLEN || nlh->nlmsg_len > data_len) + return 0; - err = selinux_nlmsg_lookup(sksec->sclass, nlh->nlmsg_type, &perm); - if (err) { - if (err == -EINVAL) { + rc = selinux_nlmsg_lookup(sclass, nlh->nlmsg_type, &perm); + if (rc == 0) { + rc = sock_has_perm(sk, perm); + if (rc) + return rc; + } else if (rc == -EINVAL) { + /* -EINVAL is a missing msg/perm mapping */ pr_warn_ratelimited("SELinux: unrecognized netlink" - " message: protocol=%hu nlmsg_type=%hu sclass=%s" - " pig=%d comm=%s\n", - sk->sk_protocol, nlh->nlmsg_type, - secclass_map[sksec->sclass - 1].name, - task_pid_nr(current), current->comm); - if (!enforcing_enabled(&selinux_state) || - security_get_allow_unknown(&selinux_state)) - err = 0; + " message: protocol=%hu nlmsg_type=%hu sclass=%s" + " pid=%d comm=%s\n", + sk->sk_protocol, nlh->nlmsg_type, + secclass_map[sclass - 1].name, + task_pid_nr(current), current->comm); + if (enforcing_enabled(&selinux_state) && + !security_get_allow_unknown(&selinux_state)) + return rc; + rc = 0; + } else if (rc == -ENOENT) { + /* -ENOENT is a missing socket/class mapping, ignore */ + rc = 0; + } else { + return rc; } - /* Ignore */ - if (err == -ENOENT) - err = 0; - goto out; + /* move to the next message after applying netlink padding */ + msg_len = NLMSG_ALIGN(nlh->nlmsg_len); + if (msg_len >= data_len) + return 0; + data_len -= msg_len; + data += msg_len; } - err = sock_has_perm(sk, perm); -out: - return err; + return rc; } #ifdef CONFIG_NETFILTER diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c index 9121e9656454f85c5dbf1e41e3b44c62e519fe4a..1bf0397175c33a1d8ae2fda1c3f481fa9154b91f 100644 --- a/sound/core/compress_offload.c +++ b/sound/core/compress_offload.c @@ -891,71 +891,124 @@ static int snd_compr_partial_drain(struct snd_compr_stream *stream) return snd_compress_wait_for_drain(stream); } -static long snd_compr_ioctl(struct file *f, unsigned int cmd, unsigned long arg) +static int snd_compr_set_next_track_param(struct snd_compr_stream *stream, + unsigned long arg) { - struct snd_compr_file *data = f->private_data; - struct snd_compr_stream *stream; - int retval = -ENOTTY; + union snd_codec_options codec_options; + int retval; - if (snd_BUG_ON(!data)) + /* set next track params when stream is running or has been setup */ + if (stream->runtime->state != SNDRV_PCM_STATE_SETUP && + stream->runtime->state != SNDRV_PCM_STATE_RUNNING) + return -EPERM; + + if (copy_from_user(&codec_options, (void __user *)arg, + sizeof(codec_options))) return -EFAULT; - stream = &data->stream; + retval = stream->ops->set_next_track_param(stream, &codec_options); + return retval; +} + +static int snd_compress_simple_ioctls(struct file *file, + struct snd_compr_stream *stream, + unsigned int cmd, unsigned long arg) +{ + int retval = -ENOTTY; - mutex_lock(&stream->device->lock); switch (_IOC_NR(cmd)) { case _IOC_NR(SNDRV_COMPRESS_IOCTL_VERSION): retval = put_user(SNDRV_COMPRESS_VERSION, (int __user *)arg) ? -EFAULT : 0; break; + case _IOC_NR(SNDRV_COMPRESS_GET_CAPS): retval = snd_compr_get_caps(stream, arg); break; + #ifndef COMPR_CODEC_CAPS_OVERFLOW case _IOC_NR(SNDRV_COMPRESS_GET_CODEC_CAPS): retval = snd_compr_get_codec_caps(stream, arg); break; #endif + + case _IOC_NR(SNDRV_COMPRESS_TSTAMP): + retval = snd_compr_tstamp(stream, arg); + break; + + case _IOC_NR(SNDRV_COMPRESS_AVAIL): + retval = snd_compr_ioctl_avail(stream, arg); + break; + + case _IOC_NR(SNDRV_COMPRESS_DRAIN): + retval = snd_compr_drain(stream); + break; + + case _IOC_NR(SNDRV_COMPRESS_PARTIAL_DRAIN): + retval = snd_compr_partial_drain(stream); + break; + } + + return retval; +} + +static long snd_compr_ioctl(struct file *f, unsigned int cmd, unsigned long arg) +{ + struct snd_compr_file *data = f->private_data; + struct snd_compr_stream *stream; + int retval = -ENOTTY; + + if (snd_BUG_ON(!data)) + return -EFAULT; + + stream = &data->stream; + + mutex_lock(&stream->device->lock); + switch (_IOC_NR(cmd)) { case _IOC_NR(SNDRV_COMPRESS_SET_PARAMS): retval = snd_compr_set_params(stream, arg); break; + case _IOC_NR(SNDRV_COMPRESS_GET_PARAMS): retval = snd_compr_get_params(stream, arg); break; + case _IOC_NR(SNDRV_COMPRESS_SET_METADATA): retval = snd_compr_set_metadata(stream, arg); break; + case _IOC_NR(SNDRV_COMPRESS_GET_METADATA): retval = snd_compr_get_metadata(stream, arg); break; - case _IOC_NR(SNDRV_COMPRESS_TSTAMP): - retval = snd_compr_tstamp(stream, arg); - break; - case _IOC_NR(SNDRV_COMPRESS_AVAIL): - retval = snd_compr_ioctl_avail(stream, arg); - break; + case _IOC_NR(SNDRV_COMPRESS_PAUSE): retval = snd_compr_pause(stream); break; + case _IOC_NR(SNDRV_COMPRESS_RESUME): retval = snd_compr_resume(stream); break; + case _IOC_NR(SNDRV_COMPRESS_START): retval = snd_compr_start(stream); break; + case _IOC_NR(SNDRV_COMPRESS_STOP): retval = snd_compr_stop(stream); break; - case _IOC_NR(SNDRV_COMPRESS_DRAIN): - retval = snd_compr_drain(stream); - break; - case _IOC_NR(SNDRV_COMPRESS_PARTIAL_DRAIN): - retval = snd_compr_partial_drain(stream); - break; + case _IOC_NR(SNDRV_COMPRESS_NEXT_TRACK): retval = snd_compr_next_track(stream); break; + case _IOC_NR(SNDRV_COMPRESS_SET_NEXT_TRACK_PARAM): + retval = snd_compr_set_next_track_param(stream, arg); + break; + + default: + mutex_unlock(&stream->device->lock); + return snd_compress_simple_ioctls(f, stream, cmd, arg); + } mutex_unlock(&stream->device->lock); return retval; diff --git a/sound/core/jack.c b/sound/core/jack.c index 84c2a17c56ee3d645433358febe9a444d1fc5c41..3a25295adb93983ba5cb6d8fe5b1fc98ac038751 100644 --- a/sound/core/jack.c +++ b/sound/core/jack.c @@ -33,13 +33,17 @@ struct snd_jack_kctl { }; #ifdef CONFIG_SND_JACK_INPUT_DEV -static int jack_switch_types[SND_JACK_SWITCH_TYPES] = { +static int jack_switch_types[] = { SW_HEADPHONE_INSERT, SW_MICROPHONE_INSERT, SW_LINEOUT_INSERT, SW_JACK_PHYSICAL_INSERT, SW_VIDEOOUT_INSERT, SW_LINEIN_INSERT, + SW_HPHL_OVERCURRENT, + SW_HPHR_OVERCURRENT, + SW_UNSUPPORT_INSERT, + SW_MICROPHONE2_INSERT, }; #endif /* CONFIG_SND_JACK_INPUT_DEV */ @@ -250,7 +254,7 @@ int snd_jack_new(struct snd_card *card, const char *id, int type, jack->type = type; - for (i = 0; i < SND_JACK_SWITCH_TYPES; i++) + for (i = 0; i < ARRAY_SIZE(jack_switch_types); i++) if (type & (1 << i)) input_set_capability(jack->input_dev, EV_SW, jack_switch_types[i]); diff --git a/sound/core/oss/pcm_plugin.c b/sound/core/oss/pcm_plugin.c index 732bbede7ebfd54f3fa105c1ace3109a4ce346b9..da400da1fafe6758d3bda4e004a980c52756c048 100644 --- a/sound/core/oss/pcm_plugin.c +++ b/sound/core/oss/pcm_plugin.c @@ -196,7 +196,9 @@ int snd_pcm_plugin_free(struct snd_pcm_plugin *plugin) return 0; } -snd_pcm_sframes_t snd_pcm_plug_client_size(struct snd_pcm_substream *plug, snd_pcm_uframes_t drv_frames) +static snd_pcm_sframes_t plug_client_size(struct snd_pcm_substream *plug, + snd_pcm_uframes_t drv_frames, + bool check_size) { struct snd_pcm_plugin *plugin, *plugin_prev, *plugin_next; int stream; @@ -209,21 +211,23 @@ snd_pcm_sframes_t snd_pcm_plug_client_size(struct snd_pcm_substream *plug, snd_p if (stream == SNDRV_PCM_STREAM_PLAYBACK) { plugin = snd_pcm_plug_last(plug); while (plugin && drv_frames > 0) { - if (drv_frames > plugin->buf_frames) - drv_frames = plugin->buf_frames; plugin_prev = plugin->prev; if (plugin->src_frames) drv_frames = plugin->src_frames(plugin, drv_frames); + if (check_size && plugin->buf_frames && + drv_frames > plugin->buf_frames) + drv_frames = plugin->buf_frames; plugin = plugin_prev; } } else if (stream == SNDRV_PCM_STREAM_CAPTURE) { plugin = snd_pcm_plug_first(plug); while (plugin && drv_frames > 0) { plugin_next = plugin->next; + if (check_size && plugin->buf_frames && + drv_frames > plugin->buf_frames) + drv_frames = plugin->buf_frames; if (plugin->dst_frames) drv_frames = plugin->dst_frames(plugin, drv_frames); - if (drv_frames > plugin->buf_frames) - drv_frames = plugin->buf_frames; plugin = plugin_next; } } else @@ -231,7 +235,9 @@ snd_pcm_sframes_t snd_pcm_plug_client_size(struct snd_pcm_substream *plug, snd_p return drv_frames; } -snd_pcm_sframes_t snd_pcm_plug_slave_size(struct snd_pcm_substream *plug, snd_pcm_uframes_t clt_frames) +static snd_pcm_sframes_t plug_slave_size(struct snd_pcm_substream *plug, + snd_pcm_uframes_t clt_frames, + bool check_size) { struct snd_pcm_plugin *plugin, *plugin_prev, *plugin_next; snd_pcm_sframes_t frames; @@ -247,26 +253,28 @@ snd_pcm_sframes_t snd_pcm_plug_slave_size(struct snd_pcm_substream *plug, snd_pc plugin = snd_pcm_plug_first(plug); while (plugin && frames > 0) { plugin_next = plugin->next; + if (check_size && plugin->buf_frames && + frames > plugin->buf_frames) + frames = plugin->buf_frames; if (plugin->dst_frames) { frames = plugin->dst_frames(plugin, frames); if (frames < 0) return frames; } - if (frames > plugin->buf_frames) - frames = plugin->buf_frames; plugin = plugin_next; } } else if (stream == SNDRV_PCM_STREAM_CAPTURE) { plugin = snd_pcm_plug_last(plug); while (plugin) { - if (frames > plugin->buf_frames) - frames = plugin->buf_frames; plugin_prev = plugin->prev; if (plugin->src_frames) { frames = plugin->src_frames(plugin, frames); if (frames < 0) return frames; } + if (check_size && plugin->buf_frames && + frames > plugin->buf_frames) + frames = plugin->buf_frames; plugin = plugin_prev; } } else @@ -274,6 +282,18 @@ snd_pcm_sframes_t snd_pcm_plug_slave_size(struct snd_pcm_substream *plug, snd_pc return frames; } +snd_pcm_sframes_t snd_pcm_plug_client_size(struct snd_pcm_substream *plug, + snd_pcm_uframes_t drv_frames) +{ + return plug_client_size(plug, drv_frames, false); +} + +snd_pcm_sframes_t snd_pcm_plug_slave_size(struct snd_pcm_substream *plug, + snd_pcm_uframes_t clt_frames) +{ + return plug_slave_size(plug, clt_frames, false); +} + static int snd_pcm_plug_formats(const struct snd_mask *mask, snd_pcm_format_t format) { @@ -630,7 +650,7 @@ snd_pcm_sframes_t snd_pcm_plug_write_transfer(struct snd_pcm_substream *plug, st src_channels = dst_channels; plugin = next; } - return snd_pcm_plug_client_size(plug, frames); + return plug_client_size(plug, frames, true); } snd_pcm_sframes_t snd_pcm_plug_read_transfer(struct snd_pcm_substream *plug, struct snd_pcm_plugin_channel *dst_channels_final, snd_pcm_uframes_t size) @@ -640,7 +660,7 @@ snd_pcm_sframes_t snd_pcm_plug_read_transfer(struct snd_pcm_substream *plug, str snd_pcm_sframes_t frames = size; int err; - frames = snd_pcm_plug_slave_size(plug, frames); + frames = plug_slave_size(plug, frames, true); if (frames < 0) return frames; diff --git a/sound/isa/opti9xx/miro.c b/sound/isa/opti9xx/miro.c index c6136c6b021474dabdd30b023fd9fb8de0d042c9..81eb9c89428d10ce157a943f4cb35c97f20b3a2f 100644 --- a/sound/isa/opti9xx/miro.c +++ b/sound/isa/opti9xx/miro.c @@ -880,10 +880,13 @@ static void snd_miro_write(struct snd_miro *chip, unsigned char reg, spin_unlock_irqrestore(&chip->lock, flags); } +static inline void snd_miro_write_mask(struct snd_miro *chip, + unsigned char reg, unsigned char value, unsigned char mask) +{ + unsigned char oldval = snd_miro_read(chip, reg); -#define snd_miro_write_mask(chip, reg, value, mask) \ - snd_miro_write(chip, reg, \ - (snd_miro_read(chip, reg) & ~(mask)) | ((value) & (mask))) + snd_miro_write(chip, reg, (oldval & ~mask) | (value & mask)); +} /* * Proc Interface diff --git a/sound/isa/opti9xx/opti92x-ad1848.c b/sound/isa/opti9xx/opti92x-ad1848.c index ac0ab6eb40f07ea8f1a857806818f69733c42deb..0d0fa6f54bddefc9c8b0b36fb95a15b206d2a3b9 100644 --- a/sound/isa/opti9xx/opti92x-ad1848.c +++ b/sound/isa/opti9xx/opti92x-ad1848.c @@ -329,10 +329,13 @@ static void snd_opti9xx_write(struct snd_opti9xx *chip, unsigned char reg, } -#define snd_opti9xx_write_mask(chip, reg, value, mask) \ - snd_opti9xx_write(chip, reg, \ - (snd_opti9xx_read(chip, reg) & ~(mask)) | ((value) & (mask))) +static inline void snd_opti9xx_write_mask(struct snd_opti9xx *chip, + unsigned char reg, unsigned char value, unsigned char mask) +{ + unsigned char oldval = snd_opti9xx_read(chip, reg); + snd_opti9xx_write(chip, reg, (oldval & ~mask) | (value & mask)); +} static int snd_opti9xx_configure(struct snd_opti9xx *chip, long port, diff --git a/sound/pci/hda/hda_beep.c b/sound/pci/hda/hda_beep.c index 066b5b59c4d7cc50c7c50de2a2f4d20cc07cf166..0224011a240fb620e7387961a7bf5923fb2cc44e 100644 --- a/sound/pci/hda/hda_beep.c +++ b/sound/pci/hda/hda_beep.c @@ -297,8 +297,12 @@ int snd_hda_mixer_amp_switch_get_beep(struct snd_kcontrol *kcontrol, { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct hda_beep *beep = codec->beep; + int chs = get_amp_channels(kcontrol); + if (beep && (!beep->enabled || !ctl_has_mute(kcontrol))) { - ucontrol->value.integer.value[0] = + if (chs & 1) + ucontrol->value.integer.value[0] = beep->enabled; + if (chs & 2) ucontrol->value.integer.value[1] = beep->enabled; return 0; } diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 3ee5b7b9b595319bac92a7e8ba4e1a81165d3798..8e1eb5f243a278676408b3e6867d7b6859f7d4a8 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -2054,7 +2054,7 @@ static int azx_first_init(struct azx *chip) /* codec detection */ if (!azx_bus(chip)->codec_mask) { dev_err(card->dev, "no codecs found!\n"); - return -ENODEV; + /* keep running the rest for the runtime PM */ } if (azx_acquire_irq(chip, 0) < 0) @@ -2076,24 +2076,15 @@ static void azx_firmware_cb(const struct firmware *fw, void *context) { struct snd_card *card = context; struct azx *chip = card->private_data; - struct pci_dev *pci = chip->pci; - if (!fw) { - dev_err(card->dev, "Cannot load firmware, aborting\n"); - goto error; - } - - chip->fw = fw; + if (fw) + chip->fw = fw; + else + dev_err(card->dev, "Cannot load firmware, continue without patching\n"); if (!chip->disabled) { /* continue probing */ - if (azx_probe_continue(chip)) - goto error; + azx_probe_continue(chip); } - return; /* OK */ - - error: - snd_card_free(card); - pci_set_drvdata(pci, NULL); } #endif @@ -2219,6 +2210,17 @@ static const struct hdac_io_ops pci_hda_io_ops = { .dma_free_pages = dma_free_pages, }; +/* Blacklist for skipping the whole probe: + * some HD-audio PCI entries are exposed without any codecs, and such devices + * should be ignored from the beginning. + */ +static const struct pci_device_id driver_blacklist[] = { + { PCI_DEVICE_SUB(0x1022, 0x1487, 0x1043, 0x874f) }, /* ASUS ROG Zenith II / Strix */ + { PCI_DEVICE_SUB(0x1022, 0x1487, 0x1462, 0xcb59) }, /* MSI TRX40 Creator */ + { PCI_DEVICE_SUB(0x1022, 0x1487, 0x1462, 0xcb60) }, /* MSI TRX40 */ + {} +}; + static const struct hda_controller_ops pci_hda_ops = { .disable_msi_reset_irq = disable_msi_reset_irq, .substream_alloc_pages = substream_alloc_pages, @@ -2238,6 +2240,11 @@ static int azx_probe(struct pci_dev *pci, bool schedule_probe; int err; + if (pci_match_id(driver_blacklist, pci)) { + dev_info(&pci->dev, "Skipping the blacklisted device\n"); + return -ENODEV; + } + if (dev >= SNDRV_CARDS) return -ENODEV; if (!enable[dev]) { @@ -2434,9 +2441,11 @@ static int azx_probe_continue(struct azx *chip) #endif /* create codec instances */ - err = azx_probe_codecs(chip, azx_max_codecs[chip->driver_type]); - if (err < 0) - goto out_free; + if (bus->codec_mask) { + err = azx_probe_codecs(chip, azx_max_codecs[chip->driver_type]); + if (err < 0) + goto out_free; + } #ifdef CONFIG_SND_HDA_PATCH_LOADER if (chip->fw) { @@ -2450,7 +2459,7 @@ static int azx_probe_continue(struct azx *chip) #endif } #endif - if ((probe_only[dev] & 1) == 0) { + if (bus->codec_mask && !(probe_only[dev] & 1)) { err = azx_codec_configure(chip); if (err < 0) goto out_free; @@ -2467,8 +2476,11 @@ static int azx_probe_continue(struct azx *chip) set_default_power_save(chip); - if (azx_has_pm_runtime(chip)) + if (azx_has_pm_runtime(chip)) { + pm_runtime_use_autosuspend(&pci->dev); + pm_runtime_allow(&pci->dev); pm_runtime_put_autosuspend(&pci->dev); + } out_free: if ((chip->driver_caps & AZX_DCAPS_I915_POWERWELL) diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c index bc4edc5607c71f8ec32eb88de3d52a836837c749..c9f3c002bd553483227a1c8ced327f1bad30abdf 100644 --- a/sound/pci/hda/patch_ca0132.c +++ b/sound/pci/hda/patch_ca0132.c @@ -1069,6 +1069,7 @@ static const struct snd_pci_quirk ca0132_quirks[] = { SND_PCI_QUIRK(0x1458, 0xA016, "Recon3Di", QUIRK_R3DI), SND_PCI_QUIRK(0x1458, 0xA026, "Gigabyte G1.Sniper Z97", QUIRK_R3DI), SND_PCI_QUIRK(0x1458, 0xA036, "Gigabyte GA-Z170X-Gaming 7", QUIRK_R3DI), + SND_PCI_QUIRK(0x3842, 0x1038, "EVGA X99 Classified", QUIRK_R3DI), SND_PCI_QUIRK(0x1102, 0x0013, "Recon3D", QUIRK_R3D), {} }; diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index c67fadd5aae533af205e1cc21fef65a1835f3f18..12a064f994b1a1da97dfb5b18b8cf95e2516e84c 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -1848,8 +1848,10 @@ static bool check_non_pcm_per_cvt(struct hda_codec *codec, hda_nid_t cvt_nid) /* Add sanity check to pass klockwork check. * This should never happen. */ - if (WARN_ON(spdif == NULL)) + if (WARN_ON(spdif == NULL)) { + mutex_unlock(&codec->spdif_mutex); return true; + } non_pcm = !!(spdif->status & IEC958_AES0_NONAUDIO); mutex_unlock(&codec->spdif_mutex); return non_pcm; diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 23aab2fdac46851e7ffba415016ac0a6cc25a7ce..fc39550f6c5d5e089b874f2528d0160e062083f5 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -379,7 +379,10 @@ static void alc_fill_eapd_coef(struct hda_codec *codec) case 0x10ec0215: case 0x10ec0233: case 0x10ec0235: + case 0x10ec0236: + case 0x10ec0245: case 0x10ec0255: + case 0x10ec0256: case 0x10ec0257: case 0x10ec0282: case 0x10ec0283: @@ -391,11 +394,6 @@ static void alc_fill_eapd_coef(struct hda_codec *codec) case 0x10ec0300: alc_update_coef_idx(codec, 0x10, 1<<9, 0); break; - case 0x10ec0236: - case 0x10ec0256: - alc_write_coef_idx(codec, 0x36, 0x5757); - alc_update_coef_idx(codec, 0x10, 1<<9, 0); - break; case 0x10ec0275: alc_update_coef_idx(codec, 0xe, 0, 1<<0); break; @@ -804,9 +802,11 @@ static void alc_ssid_check(struct hda_codec *codec, const hda_nid_t *ports) { if (!alc_subsystem_id(codec, ports)) { struct alc_spec *spec = codec->spec; - codec_dbg(codec, - "realtek: Enable default setup for auto mode as fallback\n"); - spec->init_amp = ALC_INIT_DEFAULT; + if (spec->init_amp == ALC_INIT_UNDEFINED) { + codec_dbg(codec, + "realtek: Enable default setup for auto mode as fallback\n"); + spec->init_amp = ALC_INIT_DEFAULT; + } } } @@ -2444,6 +2444,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS), SND_PCI_QUIRK(0x1458, 0xa0cd, "Gigabyte X570 Aorus Master", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK(0x1462, 0x1228, "MSI-GP63", ALC1220_FIXUP_CLEVO_P950), + SND_PCI_QUIRK(0x1462, 0x1275, "MSI-GL63", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK(0x1462, 0x1276, "MSI-GL73", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK(0x1462, 0x1293, "MSI-GP65", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD), @@ -3249,7 +3250,13 @@ static void alc256_init(struct hda_codec *codec) alc_update_coefex_idx(codec, 0x57, 0x04, 0x0007, 0x4); /* Hight power */ alc_update_coefex_idx(codec, 0x53, 0x02, 0x8000, 1 << 15); /* Clear bit */ alc_update_coefex_idx(codec, 0x53, 0x02, 0x8000, 0 << 15); - alc_update_coef_idx(codec, 0x36, 1 << 13, 1 << 5); /* Switch pcbeep path to Line in path*/ + /* + * Expose headphone mic (or possibly Line In on some machines) instead + * of PC Beep on 1Ah, and disable 1Ah loopback for all outputs. See + * Documentation/sound/hd-audio/realtek-pc-beep.rst for details of + * this register. + */ + alc_write_coef_idx(codec, 0x36, 0x5757); } static void alc256_shutup(struct hda_codec *codec) @@ -5269,17 +5276,6 @@ static void alc271_hp_gate_mic_jack(struct hda_codec *codec, } } -static void alc256_fixup_dell_xps_13_headphone_noise2(struct hda_codec *codec, - const struct hda_fixup *fix, - int action) -{ - if (action != HDA_FIXUP_ACT_PRE_PROBE) - return; - - snd_hda_codec_amp_stereo(codec, 0x1a, HDA_INPUT, 0, HDA_AMP_VOLMASK, 1); - snd_hda_override_wcaps(codec, 0x1a, get_wcaps(codec, 0x1a) & ~AC_WCAP_IN_AMP); -} - static void alc269_fixup_limit_int_mic_boost(struct hda_codec *codec, const struct hda_fixup *fix, int action) @@ -5671,8 +5667,6 @@ enum { ALC298_FIXUP_DELL1_MIC_NO_PRESENCE, ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE, ALC275_FIXUP_DELL_XPS, - ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE, - ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE2, ALC293_FIXUP_LENOVO_SPK_NOISE, ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY, ALC255_FIXUP_DELL_SPK_NOISE, @@ -6384,23 +6378,6 @@ static const struct hda_fixup alc269_fixups[] = { {} } }, - [ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE] = { - .type = HDA_FIXUP_VERBS, - .v.verbs = (const struct hda_verb[]) { - /* Disable pass-through path for FRONT 14h */ - {0x20, AC_VERB_SET_COEF_INDEX, 0x36}, - {0x20, AC_VERB_SET_PROC_COEF, 0x1737}, - {} - }, - .chained = true, - .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE - }, - [ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE2] = { - .type = HDA_FIXUP_FUNC, - .v.func = alc256_fixup_dell_xps_13_headphone_noise2, - .chained = true, - .chain_id = ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE - }, [ALC293_FIXUP_LENOVO_SPK_NOISE] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_disable_aamix, @@ -6868,17 +6845,14 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK), SND_PCI_QUIRK(0x1028, 0x06df, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK), SND_PCI_QUIRK(0x1028, 0x06e0, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK), - SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13 9350", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE2), SND_PCI_QUIRK(0x1028, 0x0706, "Dell Inspiron 7559", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER), SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE), SND_PCI_QUIRK(0x1028, 0x0738, "Dell Precision 5820", ALC269_FIXUP_NO_SHUTUP), - SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE2), SND_PCI_QUIRK(0x1028, 0x075c, "Dell XPS 27 7760", ALC298_FIXUP_SPK_VOLUME), SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME), SND_PCI_QUIRK(0x1028, 0x07b0, "Dell Precision 7520", ALC295_FIXUP_DISABLE_DAC3), SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER), SND_PCI_QUIRK(0x1028, 0x080c, "Dell WYSE", ALC225_FIXUP_DELL_WYSE_MIC_NO_PRESENCE), - SND_PCI_QUIRK(0x1028, 0x082a, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE2), SND_PCI_QUIRK(0x1028, 0x084b, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB), SND_PCI_QUIRK(0x1028, 0x084e, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB), SND_PCI_QUIRK(0x1028, 0x0871, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC), @@ -7017,6 +6991,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1558, 0x8560, "System76 Gazelle (gaze14)", ALC269_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x1558, 0x8561, "System76 Gazelle (gaze14)", ALC269_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC233_FIXUP_LENOVO_MULTI_CODECS), + SND_PCI_QUIRK(0x17aa, 0x1048, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE), SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE), SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE), @@ -7229,7 +7204,6 @@ static const struct hda_model_fixup alc269_fixup_models[] = { {.id = ALC298_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "alc298-dell1"}, {.id = ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE, .name = "alc298-dell-aio"}, {.id = ALC275_FIXUP_DELL_XPS, .name = "alc275-dell-xps"}, - {.id = ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE, .name = "alc256-dell-xps13"}, {.id = ALC293_FIXUP_LENOVO_SPK_NOISE, .name = "lenovo-spk-noise"}, {.id = ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY, .name = "lenovo-hotkey"}, {.id = ALC255_FIXUP_DELL_SPK_NOISE, .name = "dell-spk-noise"}, @@ -7820,6 +7794,7 @@ static int patch_alc269(struct hda_codec *codec) spec->gen.mixer_nid = 0; break; case 0x10ec0215: + case 0x10ec0245: case 0x10ec0285: case 0x10ec0289: spec->codec_variant = ALC269_TYPE_ALC215; @@ -8941,6 +8916,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = { HDA_CODEC_ENTRY(0x10ec0234, "ALC234", patch_alc269), HDA_CODEC_ENTRY(0x10ec0235, "ALC233", patch_alc269), HDA_CODEC_ENTRY(0x10ec0236, "ALC236", patch_alc269), + HDA_CODEC_ENTRY(0x10ec0245, "ALC245", patch_alc269), HDA_CODEC_ENTRY(0x10ec0255, "ALC255", patch_alc269), HDA_CODEC_ENTRY(0x10ec0256, "ALC256", patch_alc269), HDA_CODEC_ENTRY(0x10ec0257, "ALC257", patch_alc269), diff --git a/sound/pci/ice1712/prodigy_hifi.c b/sound/pci/ice1712/prodigy_hifi.c index c97b5528e4b80b421d3cb322a0dbdc883c0bc908..317bbb725b299aacda6f962ba2deea3dcf59f402 100644 --- a/sound/pci/ice1712/prodigy_hifi.c +++ b/sound/pci/ice1712/prodigy_hifi.c @@ -550,7 +550,7 @@ static int wm_adc_mux_enum_get(struct snd_kcontrol *kcontrol, struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); mutex_lock(&ice->gpio_mutex); - ucontrol->value.integer.value[0] = wm_get(ice, WM_ADC_MUX) & 0x1f; + ucontrol->value.enumerated.item[0] = wm_get(ice, WM_ADC_MUX) & 0x1f; mutex_unlock(&ice->gpio_mutex); return 0; } @@ -564,7 +564,7 @@ static int wm_adc_mux_enum_put(struct snd_kcontrol *kcontrol, mutex_lock(&ice->gpio_mutex); oval = wm_get(ice, WM_ADC_MUX); - nval = (oval & 0xe0) | ucontrol->value.integer.value[0]; + nval = (oval & 0xe0) | ucontrol->value.enumerated.item[0]; if (nval != oval) { wm_put(ice, WM_ADC_MUX, nval); change = 1; diff --git a/sound/soc/codecs/hdac_hdmi.c b/sound/soc/codecs/hdac_hdmi.c index be2473166bfafcc64bb0ea5c8fbdb3299c40a193..4594b1447900aa740970c80247245b5e6d4b8dd0 100644 --- a/sound/soc/codecs/hdac_hdmi.c +++ b/sound/soc/codecs/hdac_hdmi.c @@ -148,14 +148,14 @@ static struct hdac_hdmi_pcm * hdac_hdmi_get_pcm_from_cvt(struct hdac_hdmi_priv *hdmi, struct hdac_hdmi_cvt *cvt) { - struct hdac_hdmi_pcm *pcm = NULL; + struct hdac_hdmi_pcm *pcm; list_for_each_entry(pcm, &hdmi->pcm_list, head) { if (pcm->cvt == cvt) - break; + return pcm; } - return pcm; + return NULL; } static void hdac_hdmi_jack_report(struct hdac_hdmi_pcm *pcm, diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c index 896412d11a31c4a6e6ec02ac34add7d52fac3676..7c0a06b487f74609783bdfa792c5d111d7009068 100644 --- a/sound/soc/codecs/sgtl5000.c +++ b/sound/soc/codecs/sgtl5000.c @@ -1633,6 +1633,40 @@ static int sgtl5000_i2c_probe(struct i2c_client *client, dev_err(&client->dev, "Error %d initializing CHIP_CLK_CTRL\n", ret); + /* Mute everything to avoid pop from the following power-up */ + ret = regmap_write(sgtl5000->regmap, SGTL5000_CHIP_ANA_CTRL, + SGTL5000_CHIP_ANA_CTRL_DEFAULT); + if (ret) { + dev_err(&client->dev, + "Error %d muting outputs via CHIP_ANA_CTRL\n", ret); + goto disable_clk; + } + + /* + * If VAG is powered-on (e.g. from previous boot), it would be disabled + * by the write to ANA_POWER in later steps of the probe code. This + * may create a loud pop even with all outputs muted. The proper way + * to circumvent this is disabling the bit first and waiting the proper + * cool-down time. + */ + ret = regmap_read(sgtl5000->regmap, SGTL5000_CHIP_ANA_POWER, &value); + if (ret) { + dev_err(&client->dev, "Failed to read ANA_POWER: %d\n", ret); + goto disable_clk; + } + if (value & SGTL5000_VAG_POWERUP) { + ret = regmap_update_bits(sgtl5000->regmap, + SGTL5000_CHIP_ANA_POWER, + SGTL5000_VAG_POWERUP, + 0); + if (ret) { + dev_err(&client->dev, "Error %d disabling VAG\n", ret); + goto disable_clk; + } + + msleep(SGTL5000_VAG_POWERDOWN_DELAY); + } + /* Follow section 2.2.1.1 of AN3663 */ ana_pwr = SGTL5000_ANA_POWER_DEFAULT; if (sgtl5000->num_supplies <= VDDD) { diff --git a/sound/soc/codecs/sgtl5000.h b/sound/soc/codecs/sgtl5000.h index 18cae08bbd3a6f455d7ed763fbc7d44411498675..066517e352a701f5b57ea8ce0a566b707818e17d 100644 --- a/sound/soc/codecs/sgtl5000.h +++ b/sound/soc/codecs/sgtl5000.h @@ -233,6 +233,7 @@ /* * SGTL5000_CHIP_ANA_CTRL */ +#define SGTL5000_CHIP_ANA_CTRL_DEFAULT 0x0133 #define SGTL5000_LINE_OUT_MUTE 0x0100 #define SGTL5000_HP_SEL_MASK 0x0040 #define SGTL5000_HP_SEL_SHIFT 6 diff --git a/sound/soc/codecs/tas571x.c b/sound/soc/codecs/tas571x.c index ca2dfe12344ecf22cc576c06c375bcc286602af7..354f17a2fa5cf5584836d3ed78c6c1cd6f873ec7 100644 --- a/sound/soc/codecs/tas571x.c +++ b/sound/soc/codecs/tas571x.c @@ -824,8 +824,10 @@ static int tas571x_i2c_probe(struct i2c_client *client, priv->regmap = devm_regmap_init(dev, NULL, client, priv->chip->regmap_config); - if (IS_ERR(priv->regmap)) - return PTR_ERR(priv->regmap); + if (IS_ERR(priv->regmap)) { + ret = PTR_ERR(priv->regmap); + goto disable_regs; + } priv->pdn_gpio = devm_gpiod_get_optional(dev, "pdn", GPIOD_OUT_LOW); if (IS_ERR(priv->pdn_gpio)) { @@ -849,7 +851,7 @@ static int tas571x_i2c_probe(struct i2c_client *client, ret = regmap_write(priv->regmap, TAS571X_OSC_TRIM_REG, 0); if (ret) - return ret; + goto disable_regs; usleep_range(50000, 60000); @@ -865,12 +867,20 @@ static int tas571x_i2c_probe(struct i2c_client *client, */ ret = regmap_update_bits(priv->regmap, TAS571X_MVOL_REG, 1, 0); if (ret) - return ret; + goto disable_regs; } - return devm_snd_soc_register_component(&client->dev, + ret = devm_snd_soc_register_component(&client->dev, &priv->component_driver, &tas571x_dai, 1); + if (ret) + goto disable_regs; + + return ret; + +disable_regs: + regulator_bulk_disable(priv->chip->num_supply_names, priv->supplies); + return ret; } static int tas571x_i2c_remove(struct i2c_client *client) diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c index 8dc1f3d6a988e88fbb76c42b44e0e3b280d8a86f..c4c00297ada6e2c8671979a40efff7f9ece6454a 100644 --- a/sound/soc/codecs/wm8960.c +++ b/sound/soc/codecs/wm8960.c @@ -863,8 +863,7 @@ static int wm8960_hw_params(struct snd_pcm_substream *substream, wm8960->is_stream_in_use[tx] = true; - if (snd_soc_component_get_bias_level(component) == SND_SOC_BIAS_ON && - !wm8960->is_stream_in_use[!tx]) + if (!wm8960->is_stream_in_use[!tx]) return wm8960_configure_clocking(component); return 0; diff --git a/sound/soc/intel/atom/sst-atom-controls.c b/sound/soc/intel/atom/sst-atom-controls.c index 3672d36b4b66f4fcd62ed863ef7a8b44ac9b0c4b..a1d7f93a0805966ebea223df507fde9a06615186 100644 --- a/sound/soc/intel/atom/sst-atom-controls.c +++ b/sound/soc/intel/atom/sst-atom-controls.c @@ -974,7 +974,9 @@ static int sst_set_be_modules(struct snd_soc_dapm_widget *w, dev_dbg(c->dev, "Enter: widget=%s\n", w->name); if (SND_SOC_DAPM_EVENT_ON(event)) { + mutex_lock(&drv->lock); ret = sst_send_slot_map(drv); + mutex_unlock(&drv->lock); if (ret) return ret; ret = sst_send_pipe_module_params(w, k); @@ -1341,7 +1343,7 @@ int sst_send_pipe_gains(struct snd_soc_dai *dai, int stream, int mute) dai->capture_widget->name); w = dai->capture_widget; snd_soc_dapm_widget_for_each_source_path(w, p) { - if (p->connected && !p->connected(w, p->sink)) + if (p->connected && !p->connected(w, p->source)) continue; if (p->connect && p->source->power && diff --git a/sound/soc/intel/atom/sst-mfld-platform-compress.c b/sound/soc/intel/atom/sst-mfld-platform-compress.c index 6a44b19423cfe0943c86ca670bfea6c8f7538e1e..4bcd1e34c7d13ef586d076b8ce106110318a6fac 100644 --- a/sound/soc/intel/atom/sst-mfld-platform-compress.c +++ b/sound/soc/intel/atom/sst-mfld-platform-compress.c @@ -214,8 +214,8 @@ static int sst_platform_compr_pointer(struct snd_compr_stream *cstream, stream = cstream->runtime->private_data; stream->compr_ops->tstamp(sst->dev, stream->id, tstamp); - tstamp->byte_offset = tstamp->copied_total % - (u32)cstream->runtime->buffer_size; + div_u64_rem(tstamp->copied_total, (u32)cstream->runtime->buffer_size, + &tstamp->byte_offset); pr_debug("calc bytes offset/copied bytes as %d\n", tstamp->byte_offset); return 0; } diff --git a/sound/soc/intel/atom/sst/sst_drv_interface.c b/sound/soc/intel/atom/sst/sst_drv_interface.c index 5455d6e0ab53cc60271c90f16afaed3689bdf0fd..dc1160f79dc2ee65e1c522b4f932135279b86b2e 100644 --- a/sound/soc/intel/atom/sst/sst_drv_interface.c +++ b/sound/soc/intel/atom/sst/sst_drv_interface.c @@ -377,7 +377,7 @@ static int sst_cdev_tstamp(struct device *dev, unsigned int str_id, tstamp->sampling_rate = fw_tstamp.sampling_frequency; dev_dbg(dev, "PCM = %u\n", tstamp->pcm_io_frames); - dev_dbg(dev, "Ptr Query on strid = %d copied_total %d, decodec %d\n", + dev_dbg(dev, "Ptr Query on strid = %d copied_total %llu, decodec %d\n", str_id, tstamp->copied_total, tstamp->pcm_frames); dev_dbg(dev, "rendered %d\n", tstamp->pcm_io_frames); diff --git a/sound/soc/intel/atom/sst/sst_pci.c b/sound/soc/intel/atom/sst/sst_pci.c index 6906ee624cf6f4e16caa6456fdad8e80ed03cd4e..438c7bcd8c4cda40616566f6979dd402d44ab5c6 100644 --- a/sound/soc/intel/atom/sst/sst_pci.c +++ b/sound/soc/intel/atom/sst/sst_pci.c @@ -107,7 +107,7 @@ static int sst_platform_get_resources(struct intel_sst_drv *ctx) dev_dbg(ctx->dev, "DRAM Ptr %p\n", ctx->dram); do_release_regions: pci_release_regions(pci); - return 0; + return ret; } /* diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c index e58240e18b301b45d8fec568dd3ca1c0e437ba9d..f29014a7d67231e3b8b77ba45df5593c3dc45465 100644 --- a/sound/soc/intel/boards/bytcr_rt5640.c +++ b/sound/soc/intel/boards/bytcr_rt5640.c @@ -588,6 +588,17 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = { BYT_RT5640_SSP0_AIF1 | BYT_RT5640_MCLK_EN), }, + { + /* MPMAN MPWIN895CL */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "MPMAN"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "MPWIN8900CL"), + }, + .driver_data = (void *)(BYTCR_INPUT_DEFAULTS | + BYT_RT5640_MONO_SPEAKER | + BYT_RT5640_SSP0_AIF1 | + BYT_RT5640_MCLK_EN), + }, { /* MSI S100 tablet */ .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Micro-Star International Co., Ltd."), diff --git a/sound/soc/jz4740/jz4740-i2s.c b/sound/soc/jz4740/jz4740-i2s.c index 99394c03699819343d351a932ec84799001e7d2d..e099c0505b765210c7f1ff02380dff4dc513da33 100644 --- a/sound/soc/jz4740/jz4740-i2s.c +++ b/sound/soc/jz4740/jz4740-i2s.c @@ -92,7 +92,7 @@ #define JZ_AIC_I2S_STATUS_BUSY BIT(2) #define JZ_AIC_CLK_DIV_MASK 0xf -#define I2SDIV_DV_SHIFT 8 +#define I2SDIV_DV_SHIFT 0 #define I2SDIV_DV_MASK (0xf << I2SDIV_DV_SHIFT) #define I2SDIV_IDV_SHIFT 8 #define I2SDIV_IDV_MASK (0xf << I2SDIV_IDV_SHIFT) diff --git a/sound/soc/qcom/qdsp6/q6afe-dai.c b/sound/soc/qcom/qdsp6/q6afe-dai.c index 8f6c8fc073a93048cb2e33d8439105ac2dc7d025..1fc1939b90c2f790626601d16e8b9ac1c13f38dd 100644 --- a/sound/soc/qcom/qdsp6/q6afe-dai.c +++ b/sound/soc/qcom/qdsp6/q6afe-dai.c @@ -899,6 +899,8 @@ static struct snd_soc_dai_driver q6afe_dais[] = { SNDRV_PCM_RATE_16000, .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE, + .channels_min = 1, + .channels_max = 8, .rate_min = 8000, .rate_max = 48000, }, @@ -914,6 +916,8 @@ static struct snd_soc_dai_driver q6afe_dais[] = { SNDRV_PCM_RATE_16000, .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE, + .channels_min = 1, + .channels_max = 8, .rate_min = 8000, .rate_max = 48000, }, @@ -928,6 +932,8 @@ static struct snd_soc_dai_driver q6afe_dais[] = { .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000, .formats = SNDRV_PCM_FMTBIT_S16_LE, + .channels_min = 1, + .channels_max = 8, .rate_min = 8000, .rate_max = 48000, }, @@ -943,6 +949,8 @@ static struct snd_soc_dai_driver q6afe_dais[] = { SNDRV_PCM_RATE_16000, .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE, + .channels_min = 1, + .channels_max = 8, .rate_min = 8000, .rate_max = 48000, }, @@ -957,6 +965,8 @@ static struct snd_soc_dai_driver q6afe_dais[] = { .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000, .formats = SNDRV_PCM_FMTBIT_S16_LE, + .channels_min = 1, + .channels_max = 8, .rate_min = 8000, .rate_max = 48000, }, @@ -972,6 +982,8 @@ static struct snd_soc_dai_driver q6afe_dais[] = { SNDRV_PCM_RATE_16000, .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE, + .channels_min = 1, + .channels_max = 8, .rate_min = 8000, .rate_max = 48000, }, @@ -986,6 +998,8 @@ static struct snd_soc_dai_driver q6afe_dais[] = { .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000, .formats = SNDRV_PCM_FMTBIT_S16_LE, + .channels_min = 1, + .channels_max = 8, .rate_min = 8000, .rate_max = 48000, }, @@ -1001,6 +1015,8 @@ static struct snd_soc_dai_driver q6afe_dais[] = { SNDRV_PCM_RATE_16000, .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE, + .channels_min = 1, + .channels_max = 8, .rate_min = 8000, .rate_max = 48000, }, diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c index 33dc8d6ad35b2887d4b841d112b3759646c25c1a..a6cf2ac223e42c1b0a2067f5e6c66d78fd246133 100644 --- a/sound/soc/sh/rcar/ssi.c +++ b/sound/soc/sh/rcar/ssi.c @@ -566,10 +566,16 @@ static int rsnd_ssi_stop(struct rsnd_mod *mod, * Capture: It might not receave data. Do nothing */ if (rsnd_io_is_play(io)) { - rsnd_mod_write(mod, SSICR, cr | EN); + rsnd_mod_write(mod, SSICR, cr | ssi->cr_en); rsnd_ssi_status_check(mod, DIRQ); } + /* In multi-SSI mode, stop is performed by setting ssi0129 in + * SSI_CONTROL to 0 (in rsnd_ssio_stop_gen2). Do nothing here. + */ + if (rsnd_ssi_multi_slaves_runtime(io)) + return 0; + /* * disable SSI, * and, wait idle state @@ -674,6 +680,9 @@ static void rsnd_ssi_parent_attach(struct rsnd_mod *mod, if (!rsnd_rdai_is_clk_master(rdai)) return; + if (rsnd_ssi_is_multi_slave(mod, io)) + return; + switch (rsnd_mod_id(mod)) { case 1: case 2: diff --git a/sound/soc/sh/rcar/ssiu.c b/sound/soc/sh/rcar/ssiu.c index 016fbf5ac242cc1137fa8957641ef5f50b886263..7b5eb316c3665c386fef326609028bfe2c0f06c3 100644 --- a/sound/soc/sh/rcar/ssiu.c +++ b/sound/soc/sh/rcar/ssiu.c @@ -172,7 +172,7 @@ static int rsnd_ssiu_init_gen2(struct rsnd_mod *mod, i; for_each_rsnd_mod_array(i, pos, io, rsnd_ssi_array) { - shift = (i * 4) + 16; + shift = (i * 4) + 20; val = (val & ~(0xF << shift)) | rsnd_mod_id(pos) << shift; } diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c index 409d082e80d15b75576ce7be3501183ce0370547..4e6e9b6463b0a8c999f88d179a720031d23fd7eb 100644 --- a/sound/soc/soc-compress.c +++ b/sound/soc/soc-compress.c @@ -774,6 +774,28 @@ static int soc_compr_copy(struct snd_compr_stream *cstream, return ret; } +static int sst_compr_set_next_track_param(struct snd_compr_stream *cstream, + union snd_codec_options *codec_options) +{ + struct snd_soc_pcm_runtime *rtd = cstream->private_data; + struct snd_soc_component *component; + struct snd_soc_rtdcom_list *rtdcom; + int ret = 0; + + for_each_rtdcom(rtd, rtdcom) { + component = rtdcom->component; + + if (component->driver->compr_ops && + component->driver->compr_ops->set_next_track_param) + ret = + component->driver->compr_ops->set_next_track_param( + cstream, codec_options); + } + + return ret; +} + + static int soc_compr_set_metadata(struct snd_compr_stream *cstream, struct snd_compr_metadata *metadata) { @@ -840,6 +862,7 @@ static struct snd_compr_ops soc_compr_ops = { .free = soc_compr_free, .set_params = soc_compr_set_params, .set_metadata = soc_compr_set_metadata, + .set_next_track_param = sst_compr_set_next_track_param, .get_metadata = soc_compr_get_metadata, .get_params = soc_compr_get_params, .trigger = soc_compr_trigger, @@ -856,6 +879,7 @@ static struct snd_compr_ops soc_compr_dyn_ops = { .set_params = soc_compr_set_params_fe, .get_params = soc_compr_get_params, .set_metadata = soc_compr_set_metadata, + .set_next_track_param = sst_compr_set_next_track_param, .get_metadata = soc_compr_get_metadata, .trigger = soc_compr_trigger_fe, .pointer = soc_compr_pointer, diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index 7fdc2f99d7f63a0cce2f6444080f6de5385f8c51..113544e9a43df9d148232118bf6f2f7f0b113589 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c @@ -2747,6 +2747,7 @@ int snd_soc_register_card(struct snd_soc_card *card) card->instantiated = 0; mutex_init(&card->mutex); mutex_init(&card->dapm_mutex); + mutex_init(&card->dapm_power_mutex); ret = snd_soc_instantiate_card(card); if (ret != 0) @@ -3731,7 +3732,7 @@ int snd_soc_info_multi_ext(struct snd_kcontrol *kcontrol, uinfo->value.integer.max = platform_max; return 0; } -EXPORT_SYMBOL(snd_soc_info_multi_ext); +EXPORT_SYMBOL_GPL(snd_soc_info_multi_ext); int snd_soc_get_dai_name(struct of_phandle_args *args, const char **dai_name) diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c index 39f75a802374a92367bd4600c91988a7e9c3c95b..8bc9449541bf0dd9ad3556dcf4ad93ba13cc4064 100644 --- a/sound/soc/soc-dapm.c +++ b/sound/soc/soc-dapm.c @@ -410,7 +410,7 @@ static int dapm_kcontrol_data_alloc(struct snd_soc_dapm_widget *widget, memset(&template, 0, sizeof(template)); template.reg = e->reg; - template.mask = e->mask << e->shift_l; + template.mask = e->mask; template.shift = e->shift_l; template.off_val = snd_soc_enum_item_to_val(e, 0); template.on_val = template.off_val; @@ -536,8 +536,22 @@ static bool dapm_kcontrol_set_value(const struct snd_kcontrol *kcontrol, if (data->value == value) return false; - if (data->widget) - data->widget->on_val = value; + if (data->widget) { + switch (dapm_kcontrol_get_wlist(kcontrol)->widgets[0]->id) { + case snd_soc_dapm_switch: + case snd_soc_dapm_mixer: + case snd_soc_dapm_mixer_named_ctl: + data->widget->on_val = value & data->widget->mask; + break; + case snd_soc_dapm_demux: + case snd_soc_dapm_mux: + data->widget->on_val = value >> data->widget->shift; + break; + default: + data->widget->on_val = value; + break; + } + } data->value = value; @@ -792,7 +806,13 @@ static void dapm_set_mixer_path_status(struct snd_soc_dapm_path *p, int i, val = max - val; p->connect = !!val; } else { - p->connect = 0; + /* since a virtual mixer has no backing registers to + * decide which path to connect, it will try to match + * with initial state. This is to ensure + * that the default mixer choice will be + * correctly powered up during initialization. + */ + p->connect = invert; } } @@ -1591,7 +1611,7 @@ static void dapm_seq_run(struct snd_soc_card *card, /* Do we need to apply any queued changes? */ if (sort[w->id] != cur_sort || w->reg != cur_reg || w->dapm != cur_dapm || w->subseq != cur_subseq) { - if (!list_empty(&pending)) + if (cur_dapm && !list_empty(&pending)) dapm_seq_run_coalesced(card, &pending); if (cur_dapm && cur_dapm->seq_notifier) { @@ -1654,7 +1674,7 @@ static void dapm_seq_run(struct snd_soc_card *card, "ASoC: Failed to apply widget power: %d\n", ret); } - if (!list_empty(&pending)) + if (cur_dapm && !list_empty(&pending)) dapm_seq_run_coalesced(card, &pending); if (cur_dapm && cur_dapm->seq_notifier) { @@ -1899,6 +1919,7 @@ static int dapm_power_widgets(struct snd_soc_card *card, int event) lockdep_assert_held(&card->dapm_mutex); trace_snd_soc_dapm_start(card); + mutex_lock(&card->dapm_power_mutex); list_for_each_entry(d, &card->dapm_list, list) { if (dapm_idle_bias_off(d)) @@ -2018,6 +2039,7 @@ static int dapm_power_widgets(struct snd_soc_card *card, int event) pop_dbg(card->dev, card->pop_time, "DAPM sequencing finished, waiting %dms\n", card->pop_time); pop_wait(card->pop_time); + mutex_unlock(&card->dapm_power_mutex); trace_snd_soc_dapm_done(card); diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c index f4dc3d445aae9e128e2db6f85fac603c943398f1..95fc24580f85f9dc9ee5b8938e1be8816f1b39cd 100644 --- a/sound/soc/soc-ops.c +++ b/sound/soc/soc-ops.c @@ -832,7 +832,7 @@ int snd_soc_get_xr_sx(struct snd_kcontrol *kcontrol, unsigned int regbase = mc->regbase; unsigned int regcount = mc->regcount; unsigned int regwshift = component->val_bytes * BITS_PER_BYTE; - unsigned int regwmask = (1<invert; unsigned long mask = (1UL<nbits)-1; long min = mc->min; @@ -881,7 +881,7 @@ int snd_soc_put_xr_sx(struct snd_kcontrol *kcontrol, unsigned int regbase = mc->regbase; unsigned int regcount = mc->regcount; unsigned int regwshift = component->val_bytes * BITS_PER_BYTE; - unsigned int regwmask = (1<invert; unsigned long mask = (1UL<nbits)-1; long max = mc->max; diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c index 028157522aa6e590e9040c5d16c871de489709f6..05d53ffa143912f36b802a934c83797fd037a27a 100644 --- a/sound/soc/soc-pcm.c +++ b/sound/soc/soc-pcm.c @@ -2327,7 +2327,8 @@ int dpcm_be_dai_trigger(struct snd_soc_pcm_runtime *fe, int stream, switch (cmd) { case SNDRV_PCM_TRIGGER_START: if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_PREPARE) && - (be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP)) + (be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP) && + (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PAUSED)) continue; ret = dpcm_do_trigger(dpcm, be_substream, cmd); @@ -2357,7 +2358,8 @@ int dpcm_be_dai_trigger(struct snd_soc_pcm_runtime *fe, int stream, be->dpcm[stream].state = SND_SOC_DPCM_STATE_START; break; case SNDRV_PCM_TRIGGER_STOP: - if (be->dpcm[stream].state != SND_SOC_DPCM_STATE_START) + if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_START) && + (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PAUSED)) continue; if (!snd_soc_dpcm_can_be_free_stop(fe, be, stream)) diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c index 30fc45aa18690547407ec191a37b31ba0918bd02..2c6598e07dde3c69d47831b770000ae2288661fc 100644 --- a/sound/soc/soc-topology.c +++ b/sound/soc/soc-topology.c @@ -364,7 +364,7 @@ static int soc_tplg_add_kcontrol(struct soc_tplg *tplg, struct snd_soc_component *comp = tplg->comp; return soc_tplg_add_dcontrol(comp->card->snd_card, - comp->dev, k, NULL, comp, kcontrol); + comp->dev, k, comp->name_prefix, comp, kcontrol); } /* remove a mixer kcontrol */ @@ -1923,7 +1923,9 @@ static int soc_tplg_pcm_elems_load(struct soc_tplg *tplg, _pcm = pcm; } else { abi_match = false; - pcm_new_ver(tplg, pcm, &_pcm); + ret = pcm_new_ver(tplg, pcm, &_pcm); + if (ret < 0) + return ret; } /* create the FE DAIs and DAI links */ diff --git a/sound/usb/card.c b/sound/usb/card.c index 746a72e23cf9f704e0acaf50a407c56f9f96f84b..4244509230618f46cd696bf6e8e83cb3c6b55594 100644 --- a/sound/usb/card.c +++ b/sound/usb/card.c @@ -119,6 +119,82 @@ static DEFINE_MUTEX(register_mutex); static struct snd_usb_audio *usb_chip[SNDRV_CARDS]; static struct usb_driver usb_audio_driver; +/** + * find_snd_usb_substream - helper API to find usb substream context + * information using card number, pcm device number and direction. + * @card_num: card number + * @pcm_idx: pcm device number + * @direction: SNDRV_PCM_STREAM_PLAYBACK or SNDRV_PCM_STREAM_CAPTURE + * @uchip: substream context. + * disconnect_cb: callback to use for cleanup on disconnect. + */ +struct snd_usb_substream *find_snd_usb_substream(unsigned int card_num, + unsigned int pcm_idx, unsigned int direction, struct snd_usb_audio + **uchip, void (*disconnect_cb)(struct snd_usb_audio *chip)) +{ + int idx; + struct snd_usb_stream *as; + struct snd_usb_substream *subs = NULL; + struct snd_usb_audio *chip = NULL; + + mutex_lock(®ister_mutex); + /* + * legacy audio snd card number assignment is dynamic. Hence + * search using chip->card->number + */ + for (idx = 0; idx < SNDRV_CARDS; idx++) { + if (!usb_chip[idx] || !usb_chip[idx]->card) + continue; + if (usb_chip[idx]->card->number == card_num) { + chip = usb_chip[idx]; + break; + } + } + + if (!chip || atomic_read(&chip->shutdown)) { + pr_debug("%s: instance of usb card # %d does not exist\n", + __func__, card_num); + goto err; + } + + if (pcm_idx >= chip->pcm_devs) { + usb_audio_err(chip, "%s: invalid pcm dev number %u > %d\n", + __func__, pcm_idx, chip->pcm_devs); + goto err; + } + + if (direction > SNDRV_PCM_STREAM_CAPTURE) { + usb_audio_err(chip, "%s: invalid direction %u\n", __func__, + direction); + goto err; + } + + list_for_each_entry(as, &chip->pcm_list, list) { + if (as->pcm_index == pcm_idx) { + subs = &as->substream[direction]; + if (subs->interface < 0 && !subs->data_endpoint && + !subs->sync_endpoint) { + usb_audio_err(chip, "%s: stream disconnected, bail out\n", + __func__); + subs = NULL; + goto err; + } + goto done; + } + } + +done: + chip->card_num = card_num; + chip->disconnect_cb = disconnect_cb; +err: + *uchip = chip; + if (!subs) + pr_err("%s: substream instance not found\n", __func__); + mutex_unlock(®ister_mutex); + return subs; +} +EXPORT_SYMBOL_GPL(find_snd_usb_substream); + /* * disconnect streams * called from usb_audio_disconnect() @@ -354,6 +430,7 @@ static void snd_usb_audio_free(struct snd_card *card) list_for_each_entry_safe(ep, n, &chip->ep_list, list) snd_usb_endpoint_free(ep); + mutex_destroy(&chip->dev_lock); mutex_destroy(&chip->mutex); if (!atomic_read(&chip->shutdown)) dev_set_drvdata(&chip->dev->dev, NULL); @@ -481,6 +558,7 @@ static int snd_usb_audio_create(struct usb_interface *intf, chip = card->private_data; mutex_init(&chip->mutex); + mutex_init(&chip->dev_lock); init_waitqueue_head(&chip->shutdown_wait); chip->index = idx; chip->dev = dev; diff --git a/sound/usb/card.h b/sound/usb/card.h index 7f11655bde5034cdc7528a88b5f84d8f59c838ac..1282799a8b46c05e3c09c3565be86ee5b3bbfab8 100644 --- a/sound/usb/card.h +++ b/sound/usb/card.h @@ -172,4 +172,8 @@ struct snd_usb_stream { struct list_head list; }; +struct snd_usb_substream *find_snd_usb_substream(unsigned int card_num, + unsigned int pcm_idx, unsigned int direction, struct snd_usb_audio + **uchip, void (*disconnect_cb)(struct snd_usb_audio *chip)); + #endif /* __USBAUDIO_CARD_H */ diff --git a/sound/usb/format.c b/sound/usb/format.c index 9d27429ed40362e5f2c22e096ba7b8589602ba3c..c8207b52c651c1d8c2335eeeb068dd35384e46db 100644 --- a/sound/usb/format.c +++ b/sound/usb/format.c @@ -237,6 +237,52 @@ static int parse_audio_format_rates_v1(struct snd_usb_audio *chip, struct audiof return 0; } +/* + * Many Focusrite devices supports a limited set of sampling rates per + * altsetting. Maximum rate is exposed in the last 4 bytes of Format Type + * descriptor which has a non-standard bLength = 10. + */ +static bool focusrite_valid_sample_rate(struct snd_usb_audio *chip, + struct audioformat *fp, + unsigned int rate) +{ + struct usb_interface *iface; + struct usb_host_interface *alts; + unsigned char *fmt; + unsigned int max_rate; + + iface = usb_ifnum_to_if(chip->dev, fp->iface); + if (!iface) + return true; + + alts = &iface->altsetting[fp->altset_idx]; + fmt = snd_usb_find_csint_desc(alts->extra, alts->extralen, + NULL, UAC_FORMAT_TYPE); + if (!fmt) + return true; + + if (fmt[0] == 10) { /* bLength */ + max_rate = combine_quad(&fmt[6]); + + /* Validate max rate */ + if (max_rate != 48000 && + max_rate != 96000 && + max_rate != 192000 && + max_rate != 384000) { + + usb_audio_info(chip, + "%u:%d : unexpected max rate: %u\n", + fp->iface, fp->altsetting, max_rate); + + return true; + } + + return rate <= max_rate; + } + + return true; +} + /* * Helper function to walk the array of sample rate triplets reported by * the device. The problem is that we need to parse whole array first to @@ -273,6 +319,11 @@ static int parse_uac2_sample_rate_range(struct snd_usb_audio *chip, } for (rate = min; rate <= max; rate += res) { + /* Filter out invalid rates on Focusrite devices */ + if (USB_ID_VENDOR(chip->usb_id) == 0x1235 && + !focusrite_valid_sample_rate(chip, fp, rate)) + goto skip_rate; + if (fp->rate_table) fp->rate_table[nr_rates] = rate; if (!fp->rate_min || rate < fp->rate_min) @@ -287,6 +338,7 @@ static int parse_uac2_sample_rate_range(struct snd_usb_audio *chip, break; } +skip_rate: /* avoid endless loop */ if (res == 0) break; diff --git a/sound/usb/helper.c b/sound/usb/helper.c index 7712e2b84183891336469d65b861cba0beb145e3..e6e7c9d384fad1d41e4f7c220cf576bcd8af16a6 100644 --- a/sound/usb/helper.c +++ b/sound/usb/helper.c @@ -75,6 +75,7 @@ void *snd_usb_find_csint_desc(void *buffer, int buflen, void *after, u8 dsubtype } return NULL; } +EXPORT_SYMBOL_GPL(snd_usb_find_csint_desc); /* * Wrapper for usb_control_msg(). diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index f2e173b9691d3a57b88685befeb64c153f653f33..7a5c665cf4e4434e64b4912c6f1752741ef2d3b5 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c @@ -1461,7 +1461,7 @@ static int mixer_ctl_connector_get(struct snd_kcontrol *kcontrol, usb_audio_err(chip, "cannot get connectors status: req = %#x, wValue = %#x, wIndex = %#x, type = %d\n", UAC_GET_CUR, validx, idx, cval->val_type); - return ret; + return filter_error(cval, ret); } ucontrol->value.integer.value[0] = val; @@ -1765,10 +1765,16 @@ static void get_connector_control_name(struct usb_mixer_interface *mixer, /* Build a mixer control for a UAC connector control (jack-detect) */ static void build_connector_control(struct usb_mixer_interface *mixer, + const struct usbmix_name_map *imap, struct usb_audio_term *term, bool is_input) { struct snd_kcontrol *kctl; struct usb_mixer_elem_info *cval; + const struct usbmix_name_map *map; + + map = find_map(imap, term->id, 0); + if (check_ignored_ctl(map)) + return; cval = kzalloc(sizeof(*cval), GFP_KERNEL); if (!cval) @@ -1799,8 +1805,12 @@ static void build_connector_control(struct usb_mixer_interface *mixer, usb_mixer_elem_info_free(cval); return; } - get_connector_control_name(mixer, term, is_input, kctl->id.name, - sizeof(kctl->id.name)); + + if (check_mapped_name(map, kctl->id.name, sizeof(kctl->id.name))) + strlcat(kctl->id.name, " Jack", sizeof(kctl->id.name)); + else + get_connector_control_name(mixer, term, is_input, kctl->id.name, + sizeof(kctl->id.name)); kctl->private_free = snd_usb_mixer_elem_free; snd_usb_mixer_add_control(&cval->head, kctl); } @@ -2107,8 +2117,9 @@ static int parse_audio_input_terminal(struct mixer_build *state, int unitid, check_input_term(state, term_id, &iterm); /* Check for jack detection. */ - if (uac_v2v3_control_is_readable(bmctls, control)) - build_connector_control(state->mixer, &iterm, true); + if ((iterm.type & 0xff00) != 0x0100 && + uac_v2v3_control_is_readable(bmctls, control)) + build_connector_control(state->mixer, state->map, &iterm, true); return 0; } @@ -3069,13 +3080,13 @@ static int snd_usb_mixer_controls_badd(struct usb_mixer_interface *mixer, memset(&iterm, 0, sizeof(iterm)); iterm.id = UAC3_BADD_IT_ID4; iterm.type = UAC_BIDIR_TERMINAL_HEADSET; - build_connector_control(mixer, &iterm, true); + build_connector_control(mixer, map->map, &iterm, true); /* Output Term - Insertion control */ memset(&oterm, 0, sizeof(oterm)); oterm.id = UAC3_BADD_OT_ID3; oterm.type = UAC_BIDIR_TERMINAL_HEADSET; - build_connector_control(mixer, &oterm, false); + build_connector_control(mixer, map->map, &oterm, false); } return 0; @@ -3104,7 +3115,8 @@ static int snd_usb_mixer_controls(struct usb_mixer_interface *mixer) if (map->id == state.chip->usb_id) { state.map = map->map; state.selector_map = map->selector_map; - mixer->ignore_ctl_error = map->ignore_ctl_error; + mixer->connector_map = map->connector_map; + mixer->ignore_ctl_error |= map->ignore_ctl_error; break; } } @@ -3147,10 +3159,11 @@ static int snd_usb_mixer_controls(struct usb_mixer_interface *mixer) if (err < 0 && err != -EINVAL) return err; - if (uac_v2v3_control_is_readable(le16_to_cpu(desc->bmControls), + if ((state.oterm.type & 0xff00) != 0x0100 && + uac_v2v3_control_is_readable(le16_to_cpu(desc->bmControls), UAC2_TE_CONNECTOR)) { - build_connector_control(state.mixer, &state.oterm, - false); + build_connector_control(state.mixer, state.map, + &state.oterm, false); } } else { /* UAC_VERSION_3 */ struct uac3_output_terminal_descriptor *desc = p; @@ -3172,10 +3185,11 @@ static int snd_usb_mixer_controls(struct usb_mixer_interface *mixer) if (err < 0 && err != -EINVAL) return err; - if (uac_v2v3_control_is_readable(le32_to_cpu(desc->bmControls), + if ((state.oterm.type & 0xff00) != 0x0100 && + uac_v2v3_control_is_readable(le32_to_cpu(desc->bmControls), UAC3_TE_INSERTION)) { - build_connector_control(state.mixer, &state.oterm, - false); + build_connector_control(state.mixer, state.map, + &state.oterm, false); } } } @@ -3183,10 +3197,32 @@ static int snd_usb_mixer_controls(struct usb_mixer_interface *mixer) return 0; } +static int delegate_notify(struct usb_mixer_interface *mixer, int unitid, + u8 *control, u8 *channel) +{ + const struct usbmix_connector_map *map = mixer->connector_map; + + if (!map) + return unitid; + + for (; map->id; map++) { + if (map->id == unitid) { + if (control && map->control) + *control = map->control; + if (channel && map->channel) + *channel = map->channel; + return map->delegated_id; + } + } + return unitid; +} + void snd_usb_mixer_notify_id(struct usb_mixer_interface *mixer, int unitid) { struct usb_mixer_elem_list *list; + unitid = delegate_notify(mixer, unitid, NULL, NULL); + for_each_mixer_elem(list, mixer, unitid) { struct usb_mixer_elem_info *info = mixer_elem_list_to_info(list); @@ -3256,6 +3292,8 @@ static void snd_usb_mixer_interrupt_v2(struct usb_mixer_interface *mixer, return; } + unitid = delegate_notify(mixer, unitid, &control, &channel); + for_each_mixer_elem(list, mixer, unitid) count++; diff --git a/sound/usb/mixer.h b/sound/usb/mixer.h index 3d12af8bf1917bbffa6b02ad38ef0c920dc5d372..15ec90e96d4d96ef1d0e084043ce59c245acb610 100644 --- a/sound/usb/mixer.h +++ b/sound/usb/mixer.h @@ -4,6 +4,13 @@ #include +struct usbmix_connector_map { + u8 id; + u8 delegated_id; + u8 control; + u8 channel; +}; + struct usb_mixer_interface { struct snd_usb_audio *chip; struct usb_host_interface *hostif; @@ -16,6 +23,9 @@ struct usb_mixer_interface { /* the usb audio specification version this interface complies to */ int protocol; + /* optional connector delegation map */ + const struct usbmix_connector_map *connector_map; + /* Sound Blaster remote control stuff */ const struct rc_config *rc_cfg; u32 rc_code; diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c index 71069e110897a9c847df337e04919fe074426462..1689e4f242dfd0f1d8aa12526bc3f663b35db271 100644 --- a/sound/usb/mixer_maps.c +++ b/sound/usb/mixer_maps.c @@ -41,6 +41,7 @@ struct usbmix_ctl_map { u32 id; const struct usbmix_name_map *map; const struct usbmix_selector_map *selector_map; + const struct usbmix_connector_map *connector_map; int ignore_ctl_error; }; @@ -363,6 +364,43 @@ static const struct usbmix_name_map dell_alc4020_map[] = { { 0 } }; +/* Some mobos shipped with a dummy HD-audio show the invalid GET_MIN/GET_MAX + * response for Input Gain Pad (id=19, control=12) and the connector status + * for SPDIF terminal (id=18). Skip them. + */ +static const struct usbmix_name_map asus_rog_map[] = { + { 18, NULL }, /* OT, connector control */ + { 19, NULL, 12 }, /* FU, Input Gain Pad */ + {} +}; + +/* TRX40 mobos with Realtek ALC1220-VB */ +static const struct usbmix_name_map trx40_mobo_map[] = { + { 18, NULL }, /* OT, IEC958 - broken response, disabled */ + { 19, NULL, 12 }, /* FU, Input Gain Pad - broken response, disabled */ + { 16, "Speaker" }, /* OT */ + { 22, "Speaker Playback" }, /* FU */ + { 7, "Line" }, /* IT */ + { 19, "Line Capture" }, /* FU */ + { 17, "Front Headphone" }, /* OT */ + { 23, "Front Headphone Playback" }, /* FU */ + { 8, "Mic" }, /* IT */ + { 20, "Mic Capture" }, /* FU */ + { 9, "Front Mic" }, /* IT */ + { 21, "Front Mic Capture" }, /* FU */ + { 24, "IEC958 Playback" }, /* FU */ + {} +}; + +static const struct usbmix_connector_map trx40_mobo_connector_map[] = { + { 10, 16 }, /* (Back) Speaker */ + { 11, 17 }, /* Front Headphone */ + { 13, 7 }, /* Line */ + { 14, 8 }, /* Mic */ + { 15, 9 }, /* Front Mic */ + {} +}; + /* * Control map entries */ @@ -482,6 +520,29 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = { .id = USB_ID(0x05a7, 0x1020), .map = bose_companion5_map, }, + { /* Gigabyte TRX40 Aorus Pro WiFi */ + .id = USB_ID(0x0414, 0xa002), + .map = trx40_mobo_map, + .connector_map = trx40_mobo_connector_map, + }, + { /* ASUS ROG Zenith II */ + .id = USB_ID(0x0b05, 0x1916), + .map = asus_rog_map, + }, + { /* ASUS ROG Strix */ + .id = USB_ID(0x0b05, 0x1917), + .map = asus_rog_map, + }, + { /* MSI TRX40 Creator */ + .id = USB_ID(0x0db0, 0x0d64), + .map = trx40_mobo_map, + .connector_map = trx40_mobo_connector_map, + }, + { /* MSI TRX40 */ + .id = USB_ID(0x0db0, 0x543d), + .map = trx40_mobo_map, + .connector_map = trx40_mobo_connector_map, + }, { 0 } /* terminator */ }; diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c index 10c6971cf4772c931272f37cc165573457f24ea8..983e8a3ebfcfe0203c2178f652910d8082180b9a 100644 --- a/sound/usb/mixer_quirks.c +++ b/sound/usb/mixer_quirks.c @@ -1519,11 +1519,15 @@ static int snd_microii_spdif_default_get(struct snd_kcontrol *kcontrol, /* use known values for that card: interface#1 altsetting#1 */ iface = usb_ifnum_to_if(chip->dev, 1); - if (!iface || iface->num_altsetting < 2) - return -EINVAL; + if (!iface || iface->num_altsetting < 2) { + err = -EINVAL; + goto end; + } alts = &iface->altsetting[1]; - if (get_iface_desc(alts)->bNumEndpoints < 1) - return -EINVAL; + if (get_iface_desc(alts)->bNumEndpoints < 1) { + err = -EINVAL; + goto end; + } ep = get_endpoint(alts, 0)->bEndpointAddress; err = snd_usb_ctl_msg(chip->dev, diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c index 8b91be394407a33094817c6e18d3b729d086ece2..3ba1f9c5d87ac01c47c74bed7164390e8e61520b 100644 --- a/sound/usb/pcm.c +++ b/sound/usb/pcm.c @@ -149,6 +149,69 @@ static struct audioformat *find_format(struct snd_usb_substream *subs) return found; } +/* + * find a matching audio format as well as non-zero service interval + */ +static struct audioformat *find_format_and_si(struct snd_usb_substream *subs, + unsigned int datainterval) +{ + unsigned int i; + struct audioformat *fp; + struct audioformat *found = NULL; + int cur_attr = 0, attr; + + list_for_each_entry(fp, &subs->fmt_list, list) { + if (datainterval != fp->datainterval) + continue; + if (!(fp->formats & pcm_format_to_bits(subs->pcm_format))) + continue; + if (fp->channels != subs->channels) + continue; + if (subs->cur_rate < fp->rate_min || + subs->cur_rate > fp->rate_max) + continue; + if (!(fp->rates & SNDRV_PCM_RATE_CONTINUOUS)) { + for (i = 0; i < fp->nr_rates; i++) + if (fp->rate_table[i] == subs->cur_rate) + break; + if (i >= fp->nr_rates) + continue; + } + attr = fp->ep_attr & USB_ENDPOINT_SYNCTYPE; + if (!found) { + found = fp; + cur_attr = attr; + continue; + } + /* avoid async out and adaptive in if the other method + * supports the same format. + * this is a workaround for the case like + * M-audio audiophile USB. + */ + if (attr != cur_attr) { + if ((attr == USB_ENDPOINT_SYNC_ASYNC && + subs->direction == SNDRV_PCM_STREAM_PLAYBACK) || + (attr == USB_ENDPOINT_SYNC_ADAPTIVE && + subs->direction == SNDRV_PCM_STREAM_CAPTURE)) + continue; + if ((cur_attr == USB_ENDPOINT_SYNC_ASYNC && + subs->direction == SNDRV_PCM_STREAM_PLAYBACK) || + (cur_attr == USB_ENDPOINT_SYNC_ADAPTIVE && + subs->direction == SNDRV_PCM_STREAM_CAPTURE)) { + found = fp; + cur_attr = attr; + continue; + } + } + /* find the format with the largest max. packet size */ + if (fp->maxpacksize > found->maxpacksize) { + found = fp; + cur_attr = attr; + } + } + return found; +} + static int init_pitch_v1(struct snd_usb_audio *chip, int iface, struct usb_host_interface *alts, struct audioformat *fmt) @@ -580,6 +643,90 @@ static int set_format(struct snd_usb_substream *subs, struct audioformat *fmt) return 0; } +/** + * snd_usb_enable_audio_stream - Enable/disable the specified usb substream. + * @subs: pointer to the usb substream. + * @datainterval: data packet interval. + * @enable: if true, enable the usb substream. Else disable. + */ +int snd_usb_enable_audio_stream(struct snd_usb_substream *subs, + int datainterval, bool enable) +{ + struct audioformat *fmt; + struct usb_host_interface *alts; + struct usb_interface *iface; + int ret; + + if (!subs || !subs->stream) + return -EINVAL; + + if (!enable) { + if (subs->interface >= 0) { + usb_set_interface(subs->dev, subs->interface, 0); + subs->altset_idx = 0; + subs->interface = -1; + subs->cur_audiofmt = NULL; + } + + snd_usb_autosuspend(subs->stream->chip); + return 0; + } + + snd_usb_autoresume(subs->stream->chip); + if (datainterval != -EINVAL) + fmt = find_format_and_si(subs, datainterval); + else + fmt = find_format(subs); + if (!fmt) { + dev_dbg(&subs->dev->dev, + "cannot set format: format = %#x, rate = %d, channels = %d\n", + subs->pcm_format, subs->cur_rate, subs->channels); + return -EINVAL; + } + + subs->altset_idx = 0; + subs->interface = -1; + + if (!subs->stream->chip) + return -EINVAL; + + if (atomic_read(&subs->stream->chip->shutdown)) { + ret = -ENODEV; + } else { + ret = set_format(subs, fmt); + if (ret < 0) + return ret; + + if (!subs->cur_audiofmt) + return -EINVAL; + + iface = usb_ifnum_to_if(subs->dev, subs->cur_audiofmt->iface); + if (!iface) { + dev_err(&subs->dev->dev, "Could not get iface %d\n", + subs->cur_audiofmt->iface); + return -ENODEV; + } + + alts = &iface->altsetting[subs->cur_audiofmt->altset_idx]; + ret = snd_usb_init_sample_rate(subs->stream->chip, + subs->cur_audiofmt->iface, + alts, + subs->cur_audiofmt, + subs->cur_rate); + if (ret < 0) { + dev_err(&subs->dev->dev, "failed to set rate %d\n", + subs->cur_rate); + return ret; + } + } + + subs->interface = fmt->iface; + subs->altset_idx = fmt->altset_idx; + + return 0; +} +EXPORT_SYMBOL_GPL(snd_usb_enable_audio_stream); + /* * Return the score of matching two audioformats. * Veto the audioformat if: diff --git a/sound/usb/pcm.h b/sound/usb/pcm.h index 9833627c1eca5f0afe01b2aaab2ce003acc89d46..37fbcb3c314a534d69d7858a3ec9e476ea55158b 100644 --- a/sound/usb/pcm.h +++ b/sound/usb/pcm.h @@ -13,6 +13,6 @@ int snd_usb_init_pitch(struct snd_usb_audio *chip, int iface, struct usb_host_interface *alts, struct audioformat *fmt); void snd_usb_preallocate_buffer(struct snd_usb_substream *subs); - - +int snd_usb_enable_audio_stream(struct snd_usb_substream *subs, + int datainterval, bool enable); #endif /* __USBAUDIO_PCM_H */ diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h index 90d4f61cc2308619164b284813c7ee1d4fe57f8b..774aeedde071761e0a4458486220776f6534c0b6 100644 --- a/sound/usb/quirks-table.h +++ b/sound/usb/quirks-table.h @@ -3400,4 +3400,18 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"), } }, +#define ALC1220_VB_DESKTOP(vend, prod) { \ + USB_DEVICE(vend, prod), \ + .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) { \ + .vendor_name = "Realtek", \ + .product_name = "ALC1220-VB-DT", \ + .profile_name = "Realtek-ALC1220-VB-Desktop", \ + .ifnum = QUIRK_NO_INTERFACE \ + } \ +} +ALC1220_VB_DESKTOP(0x0414, 0xa002), /* Gigabyte TRX40 Aorus Pro WiFi */ +ALC1220_VB_DESKTOP(0x0db0, 0x0d64), /* MSI TRX40 Creator */ +ALC1220_VB_DESKTOP(0x0db0, 0x543d), /* MSI TRX40 */ +#undef ALC1220_VB_DESKTOP + #undef USB_DEVICE_VENDOR_SPEC diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index 5bbfd7577b33e521591385f9b87927f8993aada8..6836f827965c12f05850bea5661d82453bacf307 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c @@ -1385,7 +1385,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip, case USB_ID(0x0d8c, 0x0316): /* Hegel HD12 DSD */ case USB_ID(0x10cb, 0x0103): /* The Bit Opus #3; with fp->dsd_raw */ - case USB_ID(0x16b0, 0x06b2): /* NuPrime DAC-10 */ + case USB_ID(0x16d0, 0x06b2): /* NuPrime DAC-10 */ case USB_ID(0x16d0, 0x09dd): /* Encore mDSD */ case USB_ID(0x16d0, 0x0733): /* Furutech ADL Stratos */ case USB_ID(0x16d0, 0x09db): /* NuPrime Audio DAC-9 */ diff --git a/sound/usb/stream.c b/sound/usb/stream.c index 9d020bd0de17fbc3e82128a3e7876ca23158dc1d..c414dffa55f07109bd29da49ac3098d91fabd806 100644 --- a/sound/usb/stream.c +++ b/sound/usb/stream.c @@ -72,9 +72,14 @@ static void snd_usb_audio_stream_free(struct snd_usb_stream *stream) static void snd_usb_audio_pcm_free(struct snd_pcm *pcm) { struct snd_usb_stream *stream = pcm->private_data; + struct snd_usb_audio *chip; + if (stream) { + mutex_lock(&stream->chip->dev_lock); + chip = stream->chip; stream->pcm = NULL; snd_usb_audio_stream_free(stream); + mutex_unlock(&chip->dev_lock); } } diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h index 4a0a5a071d5160ab408fefa1d6b928afc3aed3c0..e6f0db7dc0fd39ee1d03439907fd4d7539a3ee5a 100644 --- a/sound/usb/usbaudio.h +++ b/sound/usb/usbaudio.h @@ -66,6 +66,9 @@ struct snd_usb_audio { */ struct usb_host_interface *ctrl_intf; /* the audio control interface */ + struct mutex dev_lock; /* to protect any race with disconnect */ + int card_num; /* cache pcm card number to use upon disconnect */ + void (*disconnect_cb)(struct snd_usb_audio *chip); /* callback to cleanup on disconnect */ }; #define usb_audio_err(chip, fmt, args...) \ diff --git a/sound/usb/usx2y/usbusx2yaudio.c b/sound/usb/usx2y/usbusx2yaudio.c index 2b833054e3b0d04236472e0df44ce2eef1a3fe97..bdb28e0229e6f22331b44c31ae0eefbe812ed478 100644 --- a/sound/usb/usx2y/usbusx2yaudio.c +++ b/sound/usb/usx2y/usbusx2yaudio.c @@ -695,6 +695,8 @@ static int usX2Y_rate_set(struct usX2Ydev *usX2Y, int rate) us->submitted = 2*NOOF_SETRATE_URBS; for (i = 0; i < NOOF_SETRATE_URBS; ++i) { struct urb *urb = us->urb[i]; + if (!urb) + continue; if (urb->status) { if (!err) err = -ENODEV; diff --git a/tools/accounting/getdelays.c b/tools/accounting/getdelays.c index 8cb504d3038447fe4cbb2f033d6002e8b714b252..5ef1c15e88ad2e3d4d4483a6a31c88770b939810 100644 --- a/tools/accounting/getdelays.c +++ b/tools/accounting/getdelays.c @@ -136,7 +136,7 @@ static int send_cmd(int sd, __u16 nlmsg_type, __u32 nlmsg_pid, msg.g.version = 0x1; na = (struct nlattr *) GENLMSG_DATA(&msg); na->nla_type = nla_type; - na->nla_len = nla_len + 1 + NLA_HDRLEN; + na->nla_len = nla_len + NLA_HDRLEN; memcpy(NLA_DATA(na), nla_data, nla_len); msg.n.nlmsg_len += NLMSG_ALIGN(na->nla_len); diff --git a/tools/bpf/bpftool/btf_dumper.c b/tools/bpf/bpftool/btf_dumper.c index ff0cc3c17141173560427a45e5d65b8db86e9b1a..1e7c619228a2ee5844bd458b66925ec7c88965c2 100644 --- a/tools/bpf/bpftool/btf_dumper.c +++ b/tools/bpf/bpftool/btf_dumper.c @@ -26,7 +26,7 @@ static void btf_dumper_ptr(const void *data, json_writer_t *jw, bool is_plain_text) { if (is_plain_text) - jsonw_printf(jw, "%p", data); + jsonw_printf(jw, "%p", *(void **)data); else jsonw_printf(jw, "%lu", *(unsigned long *)data); } diff --git a/tools/gpio/Makefile b/tools/gpio/Makefile index 6a73c06e069c9d0eabe52899138a14a06d1a53be..3dbf7e8b07a59887143d66f02812b2bc95f54092 100644 --- a/tools/gpio/Makefile +++ b/tools/gpio/Makefile @@ -35,7 +35,7 @@ $(OUTPUT)include/linux/gpio.h: ../../include/uapi/linux/gpio.h prepare: $(OUTPUT)include/linux/gpio.h -GPIO_UTILS_IN := $(output)gpio-utils-in.o +GPIO_UTILS_IN := $(OUTPUT)gpio-utils-in.o $(GPIO_UTILS_IN): prepare FORCE $(Q)$(MAKE) $(build)=gpio-utils diff --git a/tools/objtool/check.c b/tools/objtool/check.c index ecf5fc77f50b56dbc8b195b4ecb6a31ed696ae93..4d509734b695b158cc65f606b637f349d37b7993 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -938,10 +938,7 @@ static struct rela *find_switch_table(struct objtool_file *file, * it. */ for (; - &insn->list != &file->insn_list && - insn->sec == func->sec && - insn->offset >= func->offset; - + &insn->list != &file->insn_list && insn->func && insn->func->pfunc == func; insn = insn->first_jump_src ?: list_prev_entry(insn, list)) { if (insn != orig_insn && insn->type == INSN_JUMP_DYNAMIC) @@ -1318,7 +1315,7 @@ static int update_insn_state_regs(struct instruction *insn, struct insn_state *s struct cfi_reg *cfa = &state->cfa; struct stack_op *op = &insn->stack_op; - if (cfa->base != CFI_SP) + if (cfa->base != CFI_SP && cfa->base != CFI_SP_INDIRECT) return 0; /* push */ @@ -2089,14 +2086,27 @@ static bool ignore_unreachable_insn(struct instruction *insn) !strcmp(insn->sec->name, ".altinstr_aux")) return true; + if (!insn->func) + return false; + + /* + * CONFIG_UBSAN_TRAP inserts a UD2 when it sees + * __builtin_unreachable(). The BUG() macro has an unreachable() after + * the UD2, which causes GCC's undefined trap logic to emit another UD2 + * (or occasionally a JMP to UD2). + */ + if (list_prev_entry(insn, list)->dead_end && + (insn->type == INSN_BUG || + (insn->type == INSN_JUMP_UNCONDITIONAL && + insn->jump_dest && insn->jump_dest->type == INSN_BUG))) + return true; + /* * Check if this (or a subsequent) instruction is related to * CONFIG_UBSAN or CONFIG_KASAN. * * End the search at 5 instructions to avoid going into the weeds. */ - if (!insn->func) - return false; for (i = 0; i < 5; i++) { if (is_kasan_insn(insn) || is_ubsan_insn(insn)) diff --git a/tools/objtool/orc_dump.c b/tools/objtool/orc_dump.c index faa444270ee3ab297933504b703e4628321087ae..1a3e774941f8e60cfede635662e0ed46d9256607 100644 --- a/tools/objtool/orc_dump.c +++ b/tools/objtool/orc_dump.c @@ -78,7 +78,7 @@ int orc_dump(const char *_objname) char *name; size_t nr_sections; Elf64_Addr orc_ip_addr = 0; - size_t shstrtab_idx; + size_t shstrtab_idx, strtab_idx = 0; Elf *elf; Elf_Scn *scn; GElf_Shdr sh; @@ -139,6 +139,8 @@ int orc_dump(const char *_objname) if (!strcmp(name, ".symtab")) { symtab = data; + } else if (!strcmp(name, ".strtab")) { + strtab_idx = i; } else if (!strcmp(name, ".orc_unwind")) { orc = data->d_buf; orc_size = sh.sh_size; @@ -150,7 +152,7 @@ int orc_dump(const char *_objname) } } - if (!symtab || !orc || !orc_ip) + if (!symtab || !strtab_idx || !orc || !orc_ip) return 0; if (orc_size % sizeof(*orc) != 0) { @@ -171,21 +173,29 @@ int orc_dump(const char *_objname) return -1; } - scn = elf_getscn(elf, sym.st_shndx); - if (!scn) { - WARN_ELF("elf_getscn"); - return -1; - } - - if (!gelf_getshdr(scn, &sh)) { - WARN_ELF("gelf_getshdr"); - return -1; - } - - name = elf_strptr(elf, shstrtab_idx, sh.sh_name); - if (!name || !*name) { - WARN_ELF("elf_strptr"); - return -1; + if (GELF_ST_TYPE(sym.st_info) == STT_SECTION) { + scn = elf_getscn(elf, sym.st_shndx); + if (!scn) { + WARN_ELF("elf_getscn"); + return -1; + } + + if (!gelf_getshdr(scn, &sh)) { + WARN_ELF("gelf_getshdr"); + return -1; + } + + name = elf_strptr(elf, shstrtab_idx, sh.sh_name); + if (!name) { + WARN_ELF("elf_strptr"); + return -1; + } + } else { + name = elf_strptr(elf, strtab_idx, sym.st_name); + if (!name) { + WARN_ELF("elf_strptr"); + return -1; + } } printf("%s+%llx:", name, (unsigned long long)rela.r_addend); diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config index 510caedd73194171d66596a150aaf57f426d7c71..ae0c5bee801403c9c7c320bc2bff07777bb8d3ab 100644 --- a/tools/perf/Makefile.config +++ b/tools/perf/Makefile.config @@ -205,8 +205,17 @@ strip-libs = $(filter-out -l%,$(1)) PYTHON_CONFIG_SQ := $(call shell-sq,$(PYTHON_CONFIG)) +# Python 3.8 changed the output of `python-config --ldflags` to not include the +# '-lpythonX.Y' flag unless '--embed' is also passed. The feature check for +# libpython fails if that flag is not included in LDFLAGS +ifeq ($(shell $(PYTHON_CONFIG_SQ) --ldflags --embed 2>&1 1>/dev/null; echo $$?), 0) + PYTHON_CONFIG_LDFLAGS := --ldflags --embed +else + PYTHON_CONFIG_LDFLAGS := --ldflags +endif + ifdef PYTHON_CONFIG - PYTHON_EMBED_LDOPTS := $(shell $(PYTHON_CONFIG_SQ) --ldflags 2>/dev/null) + PYTHON_EMBED_LDOPTS := $(shell $(PYTHON_CONFIG_SQ) $(PYTHON_CONFIG_LDFLAGS) 2>/dev/null) PYTHON_EMBED_LDFLAGS := $(call strip-libs,$(PYTHON_EMBED_LDOPTS)) PYTHON_EMBED_LIBADD := $(call grep-libs,$(PYTHON_EMBED_LDOPTS)) -lutil PYTHON_EMBED_CCOPTS := $(shell $(PYTHON_CONFIG_SQ) --includes 2>/dev/null) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 02d123871ef95bc1f62fa233152dcac3afd29b33..2233cf722c69247dad9a101e2b87b6d228ec2c97 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -299,6 +299,10 @@ int *irqs_per_cpu; /* indexed by cpu_num */ void setup_all_buffers(void); +char *sys_lpi_file; +char *sys_lpi_file_sysfs = "/sys/devices/system/cpu/cpuidle/low_power_idle_system_residency_us"; +char *sys_lpi_file_debugfs = "/sys/kernel/debug/pmc_core/slp_s0_residency_usec"; + int cpu_is_not_present(int cpu) { return !CPU_ISSET_S(cpu, cpu_present_setsize, cpu_present_set); @@ -2844,8 +2848,6 @@ int snapshot_gfx_mhz(void) * * record snapshot of * /sys/devices/system/cpu/cpuidle/low_power_idle_cpu_residency_us - * - * return 1 if config change requires a restart, else return 0 */ int snapshot_cpu_lpi_us(void) { @@ -2865,17 +2867,14 @@ int snapshot_cpu_lpi_us(void) /* * snapshot_sys_lpi() * - * record snapshot of - * /sys/devices/system/cpu/cpuidle/low_power_idle_system_residency_us - * - * return 1 if config change requires a restart, else return 0 + * record snapshot of sys_lpi_file */ int snapshot_sys_lpi_us(void) { FILE *fp; int retval; - fp = fopen_or_die("/sys/devices/system/cpu/cpuidle/low_power_idle_system_residency_us", "r"); + fp = fopen_or_die(sys_lpi_file, "r"); retval = fscanf(fp, "%lld", &cpuidle_cur_sys_lpi_us); if (retval != 1) @@ -4743,10 +4742,16 @@ void process_cpuid() else BIC_NOT_PRESENT(BIC_CPU_LPI); - if (!access("/sys/devices/system/cpu/cpuidle/low_power_idle_system_residency_us", R_OK)) + if (!access(sys_lpi_file_sysfs, R_OK)) { + sys_lpi_file = sys_lpi_file_sysfs; BIC_PRESENT(BIC_SYS_LPI); - else + } else if (!access(sys_lpi_file_debugfs, R_OK)) { + sys_lpi_file = sys_lpi_file_debugfs; + BIC_PRESENT(BIC_SYS_LPI); + } else { + sys_lpi_file_sysfs = NULL; BIC_NOT_PRESENT(BIC_SYS_LPI); + } if (!quiet) decode_misc_feature_control(); @@ -5144,9 +5149,9 @@ int add_counter(unsigned int msr_num, char *path, char *name, } msrp->msr_num = msr_num; - strncpy(msrp->name, name, NAME_BYTES); + strncpy(msrp->name, name, NAME_BYTES - 1); if (path) - strncpy(msrp->path, path, PATH_BYTES); + strncpy(msrp->path, path, PATH_BYTES - 1); msrp->width = width; msrp->type = type; msrp->format = format; diff --git a/tools/testing/selftests/filesystems/incfs/Makefile b/tools/testing/selftests/filesystems/incfs/Makefile index 1f13573d36179148e00b20a1e7aa1f8ec3d0263d..5b2e627ce883c8c39de138b19dd0eeea79c7e73b 100644 --- a/tools/testing/selftests/filesystems/incfs/Makefile +++ b/tools/testing/selftests/filesystems/incfs/Makefile @@ -1,18 +1,11 @@ # SPDX-License-Identifier: GPL-2.0 -CFLAGS += -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE -Wall -lssl -lcrypto -llz4 -CFLAGS += -I../../../../../usr/include/ -CFLAGS += -I../../../../include/uapi/ -CFLAGS += -I../../../../lib +CFLAGS += -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE -Wall +CFLAGS += -I../.. -I../../../../.. +LDLIBS := -llz4 -lcrypto EXTRA_SOURCES := utils.c -CFLAGS += $(EXTRA_SOURCES) - TEST_GEN_PROGS := incfs_test -include ../../lib.mk - -$(OUTPUT)incfs_test: incfs_test.c $(EXTRA_SOURCES) -all: $(OUTPUT)incfs_test +$(TEST_GEN_PROGS): $(EXTRA_SOURCES) -clean: - rm -rf $(OUTPUT)incfs_test *.o +include ../../lib.mk diff --git a/tools/testing/selftests/filesystems/incfs/config b/tools/testing/selftests/filesystems/incfs/config deleted file mode 100644 index b6749837a31878434de0f4a44e09ae34c55563f6..0000000000000000000000000000000000000000 --- a/tools/testing/selftests/filesystems/incfs/config +++ /dev/null @@ -1 +0,0 @@ -CONFIG_INCREMENTAL_FS=y \ No newline at end of file diff --git a/tools/testing/selftests/filesystems/incfs/incfs_test.c b/tools/testing/selftests/filesystems/incfs/incfs_test.c index f9661a9eb3fa81ca73119aa80ee33ec702aa3b9f..6809399eac97b0f179bd9b827acf383a6f53ab40 100644 --- a/tools/testing/selftests/filesystems/incfs/incfs_test.c +++ b/tools/testing/selftests/filesystems/incfs/incfs_test.c @@ -2,27 +2,29 @@ /* * Copyright 2018 Google LLC */ -#include -#include -#include +#include #include -#include +#include #include +#include +#include +#include +#include +#include +#include +#include + #include -#include +#include +#include #include #include -#include -#include -#include -#include -#include + #include #include -#include "../../kselftest.h" +#include -#include "lz4.h" #include "utils.h" #define TEST_FAILURE 1 @@ -208,7 +210,7 @@ int open_file_by_id(const char *mnt_dir, incfs_uuid_t id, bool use_ioctl) { char *path = get_index_filename(mnt_dir, id); int cmd_fd = open_commands_file(mnt_dir); - int fd = open(path, O_RDWR); + int fd = open(path, O_RDWR | O_CLOEXEC); struct incfs_permit_fill permit_fill = { .file_descriptor = fd, }; @@ -281,7 +283,7 @@ static int emit_test_blocks(char *mnt_dir, struct test_file *file, .fill_blocks = ptr_to_u64(block_buf), }; ssize_t write_res = 0; - int fd; + int fd = -1; int error = 0; int i = 0; int blocks_written = 0; @@ -444,7 +446,7 @@ static loff_t read_whole_file(char *filename) loff_t bytes_read = 0; uint8_t buff[16 * 1024]; - fd = open(filename, O_RDONLY); + fd = open(filename, O_RDONLY | O_CLOEXEC); if (fd <= 0) return fd; @@ -476,7 +478,7 @@ static int read_test_file(uint8_t *buf, size_t len, char *filename, size_t bytes_to_read = len; off_t offset = ((off_t)block_idx) * INCFS_DATA_FILE_BLOCK_SIZE; - fd = open(filename, O_RDONLY); + fd = open(filename, O_RDONLY | O_CLOEXEC); if (fd <= 0) return fd; @@ -909,7 +911,7 @@ static bool iterate_directory(char *dir_to_iterate, bool root, int file_count) int i; /* Test directory iteration */ - int fd = open(dir_to_iterate, O_RDONLY | O_DIRECTORY); + int fd = open(dir_to_iterate, O_RDONLY | O_DIRECTORY | O_CLOEXEC); if (fd < 0) { print_error("Can't open directory\n"); @@ -1110,7 +1112,7 @@ static int basic_file_ops_test(char *mount_dir) char *path = concat_file_name(mount_dir, file->name); int fd; - fd = open(path, O_RDWR); + fd = open(path, O_RDWR | O_CLOEXEC); free(path); if (fd <= 0) { print_error("Can't open file"); @@ -1930,37 +1932,52 @@ static int hash_tree_test(char *mount_dir) return TEST_FAILURE; } +enum expected_log { FULL_LOG, NO_LOG, PARTIAL_LOG }; + static int validate_logs(char *mount_dir, int log_fd, struct test_file *file, - bool no_rlog) + enum expected_log expected_log) { uint8_t data[INCFS_DATA_FILE_BLOCK_SIZE]; - struct incfs_pending_read_info prs[100] = {}; + struct incfs_pending_read_info prs[2048] = {}; int prs_size = ARRAY_SIZE(prs); int block_cnt = 1 + (file->size - 1) / INCFS_DATA_FILE_BLOCK_SIZE; + int expected_read_block_cnt; int res; int read_count; - int i; + int i, j; char *filename = concat_file_name(mount_dir, file->name); int fd; - fd = open(filename, O_RDONLY); + fd = open(filename, O_RDONLY | O_CLOEXEC); free(filename); if (fd <= 0) return TEST_FAILURE; if (block_cnt > prs_size) block_cnt = prs_size; + expected_read_block_cnt = block_cnt; for (i = 0; i < block_cnt; i++) { res = pread(fd, data, sizeof(data), INCFS_DATA_FILE_BLOCK_SIZE * i); + + /* Make some read logs of type SAME_FILE_NEXT_BLOCK */ + if (i % 10 == 0) + usleep(20000); + + /* Skip some blocks to make logs of type SAME_FILE */ + if (i % 10 == 5) { + ++i; + --expected_read_block_cnt; + } + if (res <= 0) goto failure; } - read_count = - wait_for_pending_reads(log_fd, no_rlog ? 10 : 0, prs, prs_size); - if (no_rlog) { + read_count = wait_for_pending_reads( + log_fd, expected_log == NO_LOG ? 10 : 0, prs, prs_size); + if (expected_log == NO_LOG) { if (read_count == 0) goto success; if (read_count < 0) @@ -1977,14 +1994,26 @@ static int validate_logs(char *mount_dir, int log_fd, struct test_file *file, goto failure; } - if (read_count != block_cnt) { + i = 0; + if (expected_log == PARTIAL_LOG) { + if (read_count == 0) { + ksft_print_msg("No logs %s.\n", file->name); + goto failure; + } + + for (i = 0, j = 0; j < expected_read_block_cnt - read_count; + i++, j++) + if (i % 10 == 5) + ++i; + + } else if (read_count != expected_read_block_cnt) { ksft_print_msg("Bad log read count %s %d %d.\n", file->name, - read_count, block_cnt); + read_count, expected_read_block_cnt); goto failure; } - for (i = 0; i < read_count; i++) { - struct incfs_pending_read_info *read = &prs[i]; + for (j = 0; j < read_count; i++, j++) { + struct incfs_pending_read_info *read = &prs[j]; if (!same_id(&read->file_id, &file->id)) { ksft_print_msg("Bad log read ino %s\n", file->name); @@ -1997,8 +2026,8 @@ static int validate_logs(char *mount_dir, int log_fd, struct test_file *file, goto failure; } - if (i != 0) { - unsigned long psn = prs[i - 1].serial_number; + if (j != 0) { + unsigned long psn = prs[j - 1].serial_number; if (read->serial_number != psn + 1) { ksft_print_msg("Bad log read sn %s %d %d.\n", @@ -2013,6 +2042,9 @@ static int validate_logs(char *mount_dir, int log_fd, struct test_file *file, file->name); goto failure; } + + if (i % 10 == 5) + ++i; } success: @@ -2063,7 +2095,7 @@ static int read_log_test(char *mount_dir) for (i = 0; i < file_num; i++) { struct test_file *file = &test.files[i]; - if (validate_logs(mount_dir, log_fd, file, false)) + if (validate_logs(mount_dir, log_fd, file, FULL_LOG)) goto failure; } @@ -2091,7 +2123,7 @@ static int read_log_test(char *mount_dir) for (i = 0; i < file_num; i++) { struct test_file *file = &test.files[i]; - if (validate_logs(mount_dir, log_fd, file, false)) + if (validate_logs(mount_dir, log_fd, file, FULL_LOG)) goto failure; } @@ -2118,14 +2150,37 @@ static int read_log_test(char *mount_dir) for (i = 0; i < file_num; i++) { struct test_file *file = &test.files[i]; - if (validate_logs(mount_dir, log_fd, file, true)) + if (validate_logs(mount_dir, log_fd, file, NO_LOG)) goto failure; } /* * Remount and check that logs start working again */ - drop_caches = open("/proc/sys/vm/drop_caches", O_WRONLY); + drop_caches = open("/proc/sys/vm/drop_caches", O_WRONLY | O_CLOEXEC); + if (drop_caches == -1) + goto failure; + i = write(drop_caches, "3", 1); + close(drop_caches); + if (i != 1) + goto failure; + + if (mount_fs_opt(mount_dir, backing_dir, "readahead=0,rlog_pages=1", + true) != 0) + goto failure; + + /* Validate data again */ + for (i = 0; i < file_num; i++) { + struct test_file *file = &test.files[i]; + + if (validate_logs(mount_dir, log_fd, file, PARTIAL_LOG)) + goto failure; + } + + /* + * Remount and check that logs start working again + */ + drop_caches = open("/proc/sys/vm/drop_caches", O_WRONLY | O_CLOEXEC); if (drop_caches == -1) goto failure; i = write(drop_caches, "3", 1); @@ -2141,7 +2196,7 @@ static int read_log_test(char *mount_dir) for (i = 0; i < file_num; i++) { struct test_file *file = &test.files[i]; - if (validate_logs(mount_dir, log_fd, file, false)) + if (validate_logs(mount_dir, log_fd, file, FULL_LOG)) goto failure; } @@ -2215,7 +2270,7 @@ static int validate_ranges(const char *mount_dir, struct test_file *file) int cmd_fd = -1; struct incfs_permit_fill permit_fill; - fd = open(filename, O_RDONLY); + fd = open(filename, O_RDONLY | O_CLOEXEC); free(filename); if (fd <= 0) return TEST_FAILURE; @@ -2292,8 +2347,6 @@ static int validate_ranges(const char *mount_dir, struct test_file *file) if (fba.start_index >= block_cnt) { if (fba.index_out != fba.start_index) { - printf("Paul: %d, %d\n", (int)fba.index_out, - (int)fba.start_index); error = -EINVAL; goto out; } @@ -2455,7 +2508,7 @@ static int validate_hash_ranges(const char *mount_dir, struct test_file *file) if (file->size <= 4096 / 32 * 4096) return 0; - fd = open(filename, O_RDONLY); + fd = open(filename, O_RDONLY | O_CLOEXEC); free(filename); if (fd <= 0) return TEST_FAILURE; @@ -2556,6 +2609,65 @@ static int get_hash_blocks_test(char *mount_dir) return TEST_FAILURE; } +static int large_file(char *mount_dir) +{ + char *backing_dir; + int cmd_fd = -1; + int i; + int result = TEST_FAILURE; + uint8_t data[INCFS_DATA_FILE_BLOCK_SIZE] = {}; + int block_count = 3LL * 1024 * 1024 * 1024 / INCFS_DATA_FILE_BLOCK_SIZE; + struct incfs_fill_block *block_buf = + calloc(block_count, sizeof(struct incfs_fill_block)); + struct incfs_fill_blocks fill_blocks = { + .count = block_count, + .fill_blocks = ptr_to_u64(block_buf), + }; + incfs_uuid_t id; + int fd; + + backing_dir = create_backing_dir(mount_dir); + if (!backing_dir) + goto failure; + + if (mount_fs_opt(mount_dir, backing_dir, "readahead=0", false) != 0) + goto failure; + + cmd_fd = open_commands_file(mount_dir); + if (cmd_fd < 0) + goto failure; + + if (emit_file(cmd_fd, NULL, "very_large_file", &id, + (uint64_t)block_count * INCFS_DATA_FILE_BLOCK_SIZE, + NULL) < 0) + goto failure; + + for (i = 0; i < block_count; i++) { + block_buf[i].compression = COMPRESSION_NONE; + block_buf[i].block_index = i; + block_buf[i].data_len = INCFS_DATA_FILE_BLOCK_SIZE; + block_buf[i].data = ptr_to_u64(data); + } + + fd = open_file_by_id(mount_dir, id, true); + if (fd < 0) + goto failure; + + if (ioctl(fd, INCFS_IOC_FILL_BLOCKS, &fill_blocks) != block_count) + goto failure; + + if (emit_file(cmd_fd, NULL, "very_very_large_file", &id, 1LL << 40, + NULL) < 0) + goto failure; + + result = TEST_SUCCESS; + +failure: + close(fd); + close(cmd_fd); + return result; +} + static char *setup_mount_dir() { struct stat st; @@ -2590,7 +2702,7 @@ int main(int argc, char *argv[]) // NOTE - this abuses the concept of randomness - do *not* ever do this // on a machine for production use - the device will think it has good // randomness when it does not. - fd = open("/dev/urandom", O_WRONLY); + fd = open("/dev/urandom", O_WRONLY | O_CLOEXEC); count = 4096; for (int i = 0; i < 128; ++i) ioctl(fd, RNDADDTOENTCNT, &count); @@ -2625,6 +2737,7 @@ int main(int argc, char *argv[]) MAKE_TEST(read_log_test), MAKE_TEST(get_blocks_test), MAKE_TEST(get_hash_blocks_test), + MAKE_TEST(large_file), }; #undef MAKE_TEST @@ -2645,7 +2758,7 @@ int main(int argc, char *argv[]) rmdir(mount_dir); if (fails > 0) - ksft_exit_pass(); + ksft_exit_fail(); else ksft_exit_pass(); return 0; diff --git a/tools/testing/selftests/filesystems/incfs/utils.c b/tools/testing/selftests/filesystems/incfs/utils.c index 545497685d1479472b166e1fd50933f68d311e5a..e194f63ba92224d14a42285b46ff4e4ccac43261 100644 --- a/tools/testing/selftests/filesystems/incfs/utils.c +++ b/tools/testing/selftests/filesystems/incfs/utils.c @@ -2,27 +2,29 @@ /* * Copyright 2018 Google LLC */ -#include -#include #include -#include -#include +#include #include +#include +#include +#include +#include #include + #include #include -#include -#include -#include -#include -#include -#include -#include +#include +#include + #include #include #include "utils.h" +#ifndef __S_IFREG +#define __S_IFREG S_IFREG +#endif + int mount_fs(const char *mount_dir, const char *backing_dir, int read_timeout_ms) { @@ -184,7 +186,7 @@ int open_commands_file(const char *mount_dir) snprintf(cmd_file, ARRAY_SIZE(cmd_file), "%s/%s", mount_dir, INCFS_PENDING_READS_FILENAME); - cmd_fd = open(cmd_file, O_RDONLY); + cmd_fd = open(cmd_file, O_RDONLY | O_CLOEXEC); if (cmd_fd < 0) perror("Can't open commands file"); @@ -197,7 +199,7 @@ int open_log_file(const char *mount_dir) int cmd_fd; snprintf(cmd_file, ARRAY_SIZE(cmd_file), "%s/.log", mount_dir); - cmd_fd = open(cmd_file, O_RDWR); + cmd_fd = open(cmd_file, O_RDWR | O_CLOEXEC); if (cmd_fd < 0) perror("Can't open log file"); return cmd_fd; diff --git a/tools/testing/selftests/filesystems/incfs/utils.h b/tools/testing/selftests/filesystems/incfs/utils.h index 24b43287fcddc3ae1f9ec521cb347fdf1bd48e4e..9af63e4e922c5f2866ac424eec647356f2fd3e9a 100644 --- a/tools/testing/selftests/filesystems/incfs/utils.h +++ b/tools/testing/selftests/filesystems/incfs/utils.h @@ -5,7 +5,7 @@ #include #include -#include "../../include/uapi/linux/incrementalfs.h" +#include #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof(arr[0])) diff --git a/tools/testing/selftests/ftrace/settings b/tools/testing/selftests/ftrace/settings new file mode 100644 index 0000000000000000000000000000000000000000..e7b9417537fbc4626153b72e8f295ab4594c844b --- /dev/null +++ b/tools/testing/selftests/ftrace/settings @@ -0,0 +1 @@ +timeout=0 diff --git a/tools/testing/selftests/ipc/msgque.c b/tools/testing/selftests/ipc/msgque.c index 4c156aeab6b80c011ab6ad503c9521bc10341c7b..5ec4d9e18806cd81b547d5f64d2137af0a3249b5 100644 --- a/tools/testing/selftests/ipc/msgque.c +++ b/tools/testing/selftests/ipc/msgque.c @@ -137,7 +137,7 @@ int dump_queue(struct msgque_data *msgque) for (kern_id = 0; kern_id < 256; kern_id++) { ret = msgctl(kern_id, MSG_STAT, &ds); if (ret < 0) { - if (errno == -EINVAL) + if (errno == EINVAL) continue; printf("Failed to get stats for IPC queue with id %d\n", kern_id); diff --git a/tools/testing/selftests/kmod/kmod.sh b/tools/testing/selftests/kmod/kmod.sh index 0a76314b441494fcc38844c322fcaf1114912021..1f118916a83e4db30baaeb3f7569495bdeb74306 100755 --- a/tools/testing/selftests/kmod/kmod.sh +++ b/tools/testing/selftests/kmod/kmod.sh @@ -505,18 +505,23 @@ function test_num() fi } -function get_test_count() +function get_test_data() { test_num $1 - TEST_DATA=$(echo $ALL_TESTS | awk '{print $'$1'}') + local field_num=$(echo $1 | sed 's/^0*//') + echo $ALL_TESTS | awk '{print $'$field_num'}' +} + +function get_test_count() +{ + TEST_DATA=$(get_test_data $1) LAST_TWO=${TEST_DATA#*:*} echo ${LAST_TWO%:*} } function get_test_enabled() { - test_num $1 - TEST_DATA=$(echo $ALL_TESTS | awk '{print $'$1'}') + TEST_DATA=$(get_test_data $1) echo ${TEST_DATA#*:*:} } diff --git a/tools/testing/selftests/vm/mlock2-tests.c b/tools/testing/selftests/vm/mlock2-tests.c index 637b6d0ac0d0bf63d88ff5f5782a65453b486a7a..11b2301f3aa3703dcaa47e42c5c5c5386206a5e1 100644 --- a/tools/testing/selftests/vm/mlock2-tests.c +++ b/tools/testing/selftests/vm/mlock2-tests.c @@ -67,59 +67,6 @@ static int get_vm_area(unsigned long addr, struct vm_boundaries *area) return ret; } -static uint64_t get_pageflags(unsigned long addr) -{ - FILE *file; - uint64_t pfn; - unsigned long offset; - - file = fopen("/proc/self/pagemap", "r"); - if (!file) { - perror("fopen pagemap"); - _exit(1); - } - - offset = addr / getpagesize() * sizeof(pfn); - - if (fseek(file, offset, SEEK_SET)) { - perror("fseek pagemap"); - _exit(1); - } - - if (fread(&pfn, sizeof(pfn), 1, file) != 1) { - perror("fread pagemap"); - _exit(1); - } - - fclose(file); - return pfn; -} - -static uint64_t get_kpageflags(unsigned long pfn) -{ - uint64_t flags; - FILE *file; - - file = fopen("/proc/kpageflags", "r"); - if (!file) { - perror("fopen kpageflags"); - _exit(1); - } - - if (fseek(file, pfn * sizeof(flags), SEEK_SET)) { - perror("fseek kpageflags"); - _exit(1); - } - - if (fread(&flags, sizeof(flags), 1, file) != 1) { - perror("fread kpageflags"); - _exit(1); - } - - fclose(file); - return flags; -} - #define VMFLAGS "VmFlags:" static bool is_vmflag_set(unsigned long addr, const char *vmflag) @@ -159,19 +106,13 @@ static bool is_vmflag_set(unsigned long addr, const char *vmflag) #define RSS "Rss:" #define LOCKED "lo" -static bool is_vma_lock_on_fault(unsigned long addr) +static unsigned long get_value_for_name(unsigned long addr, const char *name) { - bool ret = false; - bool locked; - FILE *smaps = NULL; - unsigned long vma_size, vma_rss; char *line = NULL; - char *value; size_t size = 0; - - locked = is_vmflag_set(addr, LOCKED); - if (!locked) - goto out; + char *value_ptr; + FILE *smaps = NULL; + unsigned long value = -1UL; smaps = seek_to_smaps_entry(addr); if (!smaps) { @@ -180,112 +121,70 @@ static bool is_vma_lock_on_fault(unsigned long addr) } while (getline(&line, &size, smaps) > 0) { - if (!strstr(line, SIZE)) { + if (!strstr(line, name)) { free(line); line = NULL; size = 0; continue; } - value = line + strlen(SIZE); - if (sscanf(value, "%lu kB", &vma_size) < 1) { + value_ptr = line + strlen(name); + if (sscanf(value_ptr, "%lu kB", &value) < 1) { printf("Unable to parse smaps entry for Size\n"); goto out; } break; } - while (getline(&line, &size, smaps) > 0) { - if (!strstr(line, RSS)) { - free(line); - line = NULL; - size = 0; - continue; - } - - value = line + strlen(RSS); - if (sscanf(value, "%lu kB", &vma_rss) < 1) { - printf("Unable to parse smaps entry for Rss\n"); - goto out; - } - break; - } - - ret = locked && (vma_rss < vma_size); out: - free(line); if (smaps) fclose(smaps); - return ret; + free(line); + return value; } -#define PRESENT_BIT 0x8000000000000000ULL -#define PFN_MASK 0x007FFFFFFFFFFFFFULL -#define UNEVICTABLE_BIT (1UL << 18) - -static int lock_check(char *map) +static bool is_vma_lock_on_fault(unsigned long addr) { - unsigned long page_size = getpagesize(); - uint64_t page1_flags, page2_flags; + bool locked; + unsigned long vma_size, vma_rss; - page1_flags = get_pageflags((unsigned long)map); - page2_flags = get_pageflags((unsigned long)map + page_size); + locked = is_vmflag_set(addr, LOCKED); + if (!locked) + return false; - /* Both pages should be present */ - if (((page1_flags & PRESENT_BIT) == 0) || - ((page2_flags & PRESENT_BIT) == 0)) { - printf("Failed to make both pages present\n"); - return 1; - } + vma_size = get_value_for_name(addr, SIZE); + vma_rss = get_value_for_name(addr, RSS); - page1_flags = get_kpageflags(page1_flags & PFN_MASK); - page2_flags = get_kpageflags(page2_flags & PFN_MASK); + /* only one page is faulted in */ + return (vma_rss < vma_size); +} - /* Both pages should be unevictable */ - if (((page1_flags & UNEVICTABLE_BIT) == 0) || - ((page2_flags & UNEVICTABLE_BIT) == 0)) { - printf("Failed to make both pages unevictable\n"); - return 1; - } +#define PRESENT_BIT 0x8000000000000000ULL +#define PFN_MASK 0x007FFFFFFFFFFFFFULL +#define UNEVICTABLE_BIT (1UL << 18) - if (!is_vmflag_set((unsigned long)map, LOCKED)) { - printf("VMA flag %s is missing on page 1\n", LOCKED); - return 1; - } +static int lock_check(unsigned long addr) +{ + bool locked; + unsigned long vma_size, vma_rss; - if (!is_vmflag_set((unsigned long)map + page_size, LOCKED)) { - printf("VMA flag %s is missing on page 2\n", LOCKED); - return 1; - } + locked = is_vmflag_set(addr, LOCKED); + if (!locked) + return false; - return 0; + vma_size = get_value_for_name(addr, SIZE); + vma_rss = get_value_for_name(addr, RSS); + + return (vma_rss == vma_size); } static int unlock_lock_check(char *map) { - unsigned long page_size = getpagesize(); - uint64_t page1_flags, page2_flags; - - page1_flags = get_pageflags((unsigned long)map); - page2_flags = get_pageflags((unsigned long)map + page_size); - page1_flags = get_kpageflags(page1_flags & PFN_MASK); - page2_flags = get_kpageflags(page2_flags & PFN_MASK); - - if ((page1_flags & UNEVICTABLE_BIT) || (page2_flags & UNEVICTABLE_BIT)) { - printf("A page is still marked unevictable after unlock\n"); - return 1; - } - if (is_vmflag_set((unsigned long)map, LOCKED)) { printf("VMA flag %s is present on page 1 after unlock\n", LOCKED); return 1; } - if (is_vmflag_set((unsigned long)map + page_size, LOCKED)) { - printf("VMA flag %s is present on page 2 after unlock\n", LOCKED); - return 1; - } - return 0; } @@ -311,7 +210,7 @@ static int test_mlock_lock() goto unmap; } - if (lock_check(map)) + if (!lock_check((unsigned long)map)) goto unmap; /* Now unlock and recheck attributes */ @@ -330,64 +229,18 @@ static int test_mlock_lock() static int onfault_check(char *map) { - unsigned long page_size = getpagesize(); - uint64_t page1_flags, page2_flags; - - page1_flags = get_pageflags((unsigned long)map); - page2_flags = get_pageflags((unsigned long)map + page_size); - - /* Neither page should be present */ - if ((page1_flags & PRESENT_BIT) || (page2_flags & PRESENT_BIT)) { - printf("Pages were made present by MLOCK_ONFAULT\n"); - return 1; - } - *map = 'a'; - page1_flags = get_pageflags((unsigned long)map); - page2_flags = get_pageflags((unsigned long)map + page_size); - - /* Only page 1 should be present */ - if ((page1_flags & PRESENT_BIT) == 0) { - printf("Page 1 is not present after fault\n"); - return 1; - } else if (page2_flags & PRESENT_BIT) { - printf("Page 2 was made present\n"); - return 1; - } - - page1_flags = get_kpageflags(page1_flags & PFN_MASK); - - /* Page 1 should be unevictable */ - if ((page1_flags & UNEVICTABLE_BIT) == 0) { - printf("Failed to make faulted page unevictable\n"); - return 1; - } - if (!is_vma_lock_on_fault((unsigned long)map)) { printf("VMA is not marked for lock on fault\n"); return 1; } - if (!is_vma_lock_on_fault((unsigned long)map + page_size)) { - printf("VMA is not marked for lock on fault\n"); - return 1; - } - return 0; } static int unlock_onfault_check(char *map) { unsigned long page_size = getpagesize(); - uint64_t page1_flags; - - page1_flags = get_pageflags((unsigned long)map); - page1_flags = get_kpageflags(page1_flags & PFN_MASK); - - if (page1_flags & UNEVICTABLE_BIT) { - printf("Page 1 is still marked unevictable after unlock\n"); - return 1; - } if (is_vma_lock_on_fault((unsigned long)map) || is_vma_lock_on_fault((unsigned long)map + page_size)) { @@ -445,7 +298,6 @@ static int test_lock_onfault_of_present() char *map; int ret = 1; unsigned long page_size = getpagesize(); - uint64_t page1_flags, page2_flags; map = mmap(NULL, 2 * page_size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); @@ -465,17 +317,6 @@ static int test_lock_onfault_of_present() goto unmap; } - page1_flags = get_pageflags((unsigned long)map); - page2_flags = get_pageflags((unsigned long)map + page_size); - page1_flags = get_kpageflags(page1_flags & PFN_MASK); - page2_flags = get_kpageflags(page2_flags & PFN_MASK); - - /* Page 1 should be unevictable */ - if ((page1_flags & UNEVICTABLE_BIT) == 0) { - printf("Failed to make present page unevictable\n"); - goto unmap; - } - if (!is_vma_lock_on_fault((unsigned long)map) || !is_vma_lock_on_fault((unsigned long)map + page_size)) { printf("VMA with present pages is not marked lock on fault\n"); @@ -507,7 +348,7 @@ static int test_munlockall() goto out; } - if (lock_check(map)) + if (!lock_check((unsigned long)map)) goto unmap; if (munlockall()) { @@ -549,7 +390,7 @@ static int test_munlockall() goto out; } - if (lock_check(map)) + if (!lock_check((unsigned long)map)) goto unmap; if (munlockall()) { diff --git a/tools/testing/selftests/x86/ptrace_syscall.c b/tools/testing/selftests/x86/ptrace_syscall.c index 6f22238f32173db4ef4d710666807cb5397f7dc0..12aaa063196e7406a08188a64570d46ce407ec91 100644 --- a/tools/testing/selftests/x86/ptrace_syscall.c +++ b/tools/testing/selftests/x86/ptrace_syscall.c @@ -414,8 +414,12 @@ int main() #if defined(__i386__) && (!defined(__GLIBC__) || __GLIBC__ > 2 || __GLIBC_MINOR__ >= 16) vsyscall32 = (void *)getauxval(AT_SYSINFO); - printf("[RUN]\tCheck AT_SYSINFO return regs\n"); - test_sys32_regs(do_full_vsyscall32); + if (vsyscall32) { + printf("[RUN]\tCheck AT_SYSINFO return regs\n"); + test_sys32_regs(do_full_vsyscall32); + } else { + printf("[SKIP]\tAT_SYSINFO is not available\n"); + } #endif test_ptrace_syscall_restart(); diff --git a/tools/vm/Makefile b/tools/vm/Makefile index 20f6cf04377f0c257aeb4bacb3133b6a35e9e9b6..9860622cbb151126571ef0962cdab5735c59c5dd 100644 --- a/tools/vm/Makefile +++ b/tools/vm/Makefile @@ -1,6 +1,8 @@ # SPDX-License-Identifier: GPL-2.0 # Makefile for vm tools # +include ../scripts/Makefile.include + TARGETS=page-types slabinfo page_owner_sort LIB_DIR = ../lib/api diff --git a/usr/Kconfig b/usr/Kconfig index 43658b8a975e57a8d8b98ce0c1fd78e7e810afe3..8b4826de1189f9cdb1f73b412aec078829974138 100644 --- a/usr/Kconfig +++ b/usr/Kconfig @@ -131,17 +131,6 @@ choice If in doubt, select 'None' -config INITRAMFS_COMPRESSION_NONE - bool "None" - help - Do not compress the built-in initramfs at all. This may sound wasteful - in space, but, you should be aware that the built-in initramfs will be - compressed at a later stage anyways along with the rest of the kernel, - on those architectures that support this. However, not compressing the - initramfs may lead to slightly higher memory consumption during a - short time at boot, while both the cpio image and the unpacked - filesystem image will be present in memory simultaneously - config INITRAMFS_COMPRESSION_GZIP bool "Gzip" depends on RD_GZIP @@ -214,6 +203,17 @@ config INITRAMFS_COMPRESSION_LZ4 If you choose this, keep in mind that most distros don't provide lz4 by default which could cause a build failure. +config INITRAMFS_COMPRESSION_NONE + bool "None" + help + Do not compress the built-in initramfs at all. This may sound wasteful + in space, but, you should be aware that the built-in initramfs will be + compressed at a later stage anyways along with the rest of the kernel, + on those architectures that support this. However, not compressing the + initramfs may lead to slightly higher memory consumption during a + short time at boot, while both the cpio image and the unpacked + filesystem image will be present in memory simultaneously + endchoice config INITRAMFS_COMPRESSION diff --git a/virt/kvm/arm/hyp/aarch32.c b/virt/kvm/arm/hyp/aarch32.c index d31f267961e75a956174964e445338a74abc757b..25c0e47d57cbe314ff9b993c4456c0d8fd65a0d1 100644 --- a/virt/kvm/arm/hyp/aarch32.c +++ b/virt/kvm/arm/hyp/aarch32.c @@ -125,12 +125,16 @@ static void __hyp_text kvm_adjust_itstate(struct kvm_vcpu *vcpu) */ void __hyp_text kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr) { + u32 pc = *vcpu_pc(vcpu); bool is_thumb; is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_AA32_T_BIT); if (is_thumb && !is_wide_instr) - *vcpu_pc(vcpu) += 2; + pc += 2; else - *vcpu_pc(vcpu) += 4; + pc += 4; + + *vcpu_pc(vcpu) = pc; + kvm_adjust_itstate(vcpu); } diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c index 762f81900529eef4051d42517aa117875927d565..9d06a1f8e6c0ade065266f5c535eec304390a09c 100644 --- a/virt/kvm/arm/vgic/vgic-mmio.c +++ b/virt/kvm/arm/vgic/vgic-mmio.c @@ -381,7 +381,7 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq, static void vgic_change_active_prepare(struct kvm_vcpu *vcpu, u32 intid) { if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 || - intid > VGIC_NR_PRIVATE_IRQS) + intid >= VGIC_NR_PRIVATE_IRQS) kvm_arm_halt_guest(vcpu->kvm); } @@ -389,7 +389,7 @@ static void vgic_change_active_prepare(struct kvm_vcpu *vcpu, u32 intid) static void vgic_change_active_finish(struct kvm_vcpu *vcpu, u32 intid) { if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 || - intid > VGIC_NR_PRIVATE_IRQS) + intid >= VGIC_NR_PRIVATE_IRQS) kvm_arm_resume_guest(vcpu->kvm); } diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 4e499b78569b7a7f2236830914c6f184349985e0..aca15bd1cc4cd629bb307c7d956f7ffea4330f8b 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -52,9 +52,9 @@ #include #include #include +#include #include -#include #include #include #include @@ -1705,6 +1705,153 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) } EXPORT_SYMBOL_GPL(gfn_to_page); +void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache) +{ + if (pfn == 0) + return; + + if (cache) + cache->pfn = cache->gfn = 0; + + if (dirty) + kvm_release_pfn_dirty(pfn); + else + kvm_release_pfn_clean(pfn); +} + +static void kvm_cache_gfn_to_pfn(struct kvm_memory_slot *slot, gfn_t gfn, + struct gfn_to_pfn_cache *cache, u64 gen) +{ + kvm_release_pfn(cache->pfn, cache->dirty, cache); + + cache->pfn = gfn_to_pfn_memslot(slot, gfn); + cache->gfn = gfn; + cache->dirty = false; + cache->generation = gen; +} + +static int __kvm_map_gfn(struct kvm_memslots *slots, gfn_t gfn, + struct kvm_host_map *map, + struct gfn_to_pfn_cache *cache, + bool atomic) +{ + kvm_pfn_t pfn; + void *hva = NULL; + struct page *page = KVM_UNMAPPED_PAGE; + struct kvm_memory_slot *slot = __gfn_to_memslot(slots, gfn); + u64 gen = slots->generation; + + if (!map) + return -EINVAL; + + if (cache) { + if (!cache->pfn || cache->gfn != gfn || + cache->generation != gen) { + if (atomic) + return -EAGAIN; + kvm_cache_gfn_to_pfn(slot, gfn, cache, gen); + } + pfn = cache->pfn; + } else { + if (atomic) + return -EAGAIN; + pfn = gfn_to_pfn_memslot(slot, gfn); + } + if (is_error_noslot_pfn(pfn)) + return -EINVAL; + + if (pfn_valid(pfn)) { + page = pfn_to_page(pfn); + if (atomic) + hva = kmap_atomic(page); + else + hva = kmap(page); +#ifdef CONFIG_HAS_IOMEM + } else if (!atomic) { + hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB); + } else { + return -EINVAL; +#endif + } + + if (!hva) + return -EFAULT; + + map->page = page; + map->hva = hva; + map->pfn = pfn; + map->gfn = gfn; + + return 0; +} + +int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map, + struct gfn_to_pfn_cache *cache, bool atomic) +{ + return __kvm_map_gfn(kvm_memslots(vcpu->kvm), gfn, map, + cache, atomic); +} +EXPORT_SYMBOL_GPL(kvm_map_gfn); + +int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map) +{ + return __kvm_map_gfn(kvm_vcpu_memslots(vcpu), gfn, map, + NULL, false); +} +EXPORT_SYMBOL_GPL(kvm_vcpu_map); + +static void __kvm_unmap_gfn(struct kvm_memory_slot *memslot, + struct kvm_host_map *map, + struct gfn_to_pfn_cache *cache, + bool dirty, bool atomic) +{ + if (!map) + return; + + if (!map->hva) + return; + + if (map->page != KVM_UNMAPPED_PAGE) { + if (atomic) + kunmap_atomic(map->hva); + else + kunmap(map->page); + } +#ifdef CONFIG_HAS_IOMEM + else if (!atomic) + memunmap(map->hva); + else + WARN_ONCE(1, "Unexpected unmapping in atomic context"); +#endif + + if (dirty) + mark_page_dirty_in_slot(memslot, map->gfn); + + if (cache) + cache->dirty |= dirty; + else + kvm_release_pfn(map->pfn, dirty, NULL); + + map->hva = NULL; + map->page = NULL; +} + +int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map, + struct gfn_to_pfn_cache *cache, bool dirty, bool atomic) +{ + __kvm_unmap_gfn(gfn_to_memslot(vcpu->kvm, map->gfn), map, + cache, dirty, atomic); + return 0; +} +EXPORT_SYMBOL_GPL(kvm_unmap_gfn); + +void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty) +{ + __kvm_unmap_gfn(kvm_vcpu_gfn_to_memslot(vcpu, map->gfn), map, NULL, + dirty, false); +} +EXPORT_SYMBOL_GPL(kvm_vcpu_unmap); + struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn) { kvm_pfn_t pfn;